Caffe源码解读(二):caffe.proto(下)

message ContrastiveLossParameter {
  // margin for dissimilar pair
  optional float margin = 1 [default = 1.0];
  // The first implementation of this cost did not exactly match the cost of
  // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.
  // legacy_version = false (the default) uses (margin - d)^2 as proposed in the
  // Hadsell paper. New models should probably use this version.
  // legacy_version = true uses (margin - d^2). This is kept to support /
  // reproduce existing models and results
  // 这个成本的第一个实现并不完全匹配的成本  Hadsell等人2006 - 使用(margin-d ^ 2)而不是(margin-d)^ 2。
  // legacy_version = false(默认)使用(margin-d)^ 2建议的  Hadsell文中。 新模型应该可以使用这个版本。
  // legacy_version = true uses(margin - d ^ 2)。 这保持支持/  重现现有模型和结果
  optional bool legacy_version = 2 [default = false];
}

message ConvolutionParameter {
  optional uint32 num_output = 1; // The number of outputs for the layer
  optional bool bias_term = 2 [default = true]; // whether to have bias terms:是否含有偏置

  // Pad, kernel size, and stride are all given as a single value for equal
  // dimensions in all spatial dimensions, or once per spatial dimension.
  // 填充,内核大小和步幅都被给定为在所有空间维度中相等维度的单个值,或者每个空间维度一次。
  repeated uint32 pad = 3; // The padding size; defaults to 0
  repeated uint32 kernel_size = 4; // The kernel size
  repeated uint32 stride = 6; // The stride; defaults to 1
  // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
  // holes. (Kernel dilation is sometimes referred to by its use in the
  // algorithme à trous from Holschneider et al. 1987.)
  // 用于膨胀内核的因子,(隐含地)填充所产生的孔。
  //(内核膨胀有时通过其在Holschneider等人1987的算法中的使用来指代)
  repeated uint32 dilation = 18; // The dilation; defaults to 1

  // For 2D convolution only, the *_h and *_w versions may also be used to
  // specify both spatial dimensions.
  // 仅针对2D卷积, *_h and *_w version也可以用作指定的时域维度
  optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only)
  optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only)
  optional uint32 kernel_h = 11; // The kernel height (2D only)
  optional uint32 kernel_w = 12; // The kernel width (2D only)
  optional uint32 stride_h = 13; // The stride height (2D only)
  optional uint32 stride_w = 14; // The stride width (2D only)

  optional uint32 group = 5 [default = 1]; // The group size for group conv

  optional FillerParameter weight_filler = 7; // The filler for the weight
  optional FillerParameter bias_filler = 8; // The filler for the bias
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 15 [default = DEFAULT];

  // The axis to interpret as "channels" when performing convolution.
  // Preceding dimensions are treated as independent inputs;
  // succeeding dimensions are treated as "spatial".
  // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
  // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
  // groups g>1) filters across the spatial axes (H, W) of the input.
  // With (N, C, D, H, W) inputs, and axis == 1, we perform
  // N independent 3D convolutions, sliding (C/g)-channels
  // filters across the spatial axes (D, H, W) of the input.
  // 执行卷积时解释为“通道”的轴。 先前维度被视为独立输入;后续维度被视为“空间”。
  //(N,C,H,W)输入和axis == 1(默认),我们执行N个独立的2D卷积,
  // 滑动C通道(或(C / g)通道,对于组g> 1)在输入的空间轴(H,W)上进行滤波。
  // 利用(N,C,D,H,W)输入和轴== 1,我们执行N个独立的3D卷积,在输入的空间轴(D,H,W)上滑动(C / g) 。
  optional int32 axis = 16 [default = 1];

  // Whether to force use of the general ND convolution, even if a specific
  // implementation for blobs of the appropriate number of spatial dimensions
  // is available. (Currently, there is only a 2D-specific convolution
  // implementation; for input blobs with num_axes != 2, this option is
  // ignored and the ND implementation will be used.)
  // 是否强制使用一般的ND卷积,即使可用具有适当数量的空间维度的斑点的特定实现。
  // (目前,只有2D特定卷积实现;对于num_axes!= 2的输入blob,此选项被忽略,将使用ND实现)。
  optional bool force_nd_im2col = 17 [default = false];
}

message DataParameter {
  enum DB {
    LEVELDB = 0;
    LMDB = 1;
  }
  // Specify the data source. //指定数据源
  optional string source = 1;
  // Specify the batch size. //指定块尺寸
  optional uint32 batch_size = 4;
  // The rand_skip variable is for the data layer to skip a few data points
  // to avoid all asynchronous sgd clients to start at the same point. The skip
  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  // be larger than the number of keys in the database.
  // DEPRECATED. Each solver accesses a different subset of the database.
  // rand_skip变量用于数据层跳过几个数据点,以避免所有异步sgd客户端在同一点开始。
  // 跳过点将被设置为rand_skip*rand(0,1)。请注意,rand_skip不应该大于数据库中的键数。
  // 已过时。每个求解器访问数据库的不同子集。
  optional uint32 rand_skip = 7 [default = 0];
  optional DB backend = 8 [default = LEVELDB];
  // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  // simple scaling and subtracting the data mean, if provided. Note that the
  // mean subtraction is always carried out before scaling.
  // 过时了。参见 TransformationParameter。
  // 如果使用数据预处理,我们可以做一些简单的尺度化和 对数据均值的 减法。
  // 注意,减法操作在尺度化操作之前
  optional float scale = 2 [default = 1];
  optional string mean_file = 3;
  // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  // crop an image.
  // 过时了,参见 TransformationParameter。指定是否要随机裁剪图片。
  optional uint32 crop_size = 5 [default = 0];
  // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  // data.
   // 过时了,参见 TransformationParameter。指定是否要随机镜像(拷贝)数据。
  optional bool mirror = 6 [default = false];
  // Force the encoded image to have 3 color channels
  // 强制将图像编码成三通道颜色,默认设置为否
  optional bool force_encoded_color = 9 [default = false];
  // Prefetch queue (Number of batches to prefetch to host memory, increase if
  // data access bandwidth varies).
  // 预取队列(要预取到主机内存的批次数,如果数据访问带宽变化则增加)。
  optional uint32 prefetch = 10 [default = 4];
}

message DropoutParameter {
  optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio:衰减率,默认设置为0.5
  optional bool scale_train = 2 [default = true];  // scale train or test phase:尺度化训练或测试状态
}

// DummyDataLayer fills any number of arbitrarily shaped blobs with random
// (or constant) data generated by "Fillers" (see "message FillerParameter").
//  假数据层用随机填充任意数量的任意形状的斑点  (或常量)由“填充器”生成的数据(见“消息填充参数”)。
message DummyDataParameter {
  // This layer produces N >= 1 top blobs.  DummyDataParameter must specify 1 or N
  // shape fields, and 0, 1 or N data_fillers.
  // 该层产生N> = 1个顶部斑点。 假数据层必须指定1或N 形状字段,以及0,1或N data_fillers。
  // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used.
  // 如果数据填充器指定为0,使用值为0的常数填充器。
  // If 1 data_filler is specified, it is applied to all top blobs.  If N are
  // specified, the ith is applied to the ith top blob.
  //  如果数据填充器指定为1,应用所有的顶层blobs。
  // 如果数据 填充器指定为N,i 应用第i的顶层blobs
  repeated FillerParameter data_filler = 1;
  repeated BlobShape shape = 6;

  // 4D dimensions -- deprecated.  Use "shape" instead.
  repeated uint32 num = 2;
  repeated uint32 channels = 3;
  repeated uint32 height = 4;
  repeated uint32 width = 5;
}

message EltwiseParameter {
  enum EltwiseOp {
    PROD = 0;
    SUM = 1;
    MAX = 2;
  }
  optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation:元素操作
  repeated float coeff = 2; // blob-wise coefficient for SUM operation:用于SUM操作的blob系数

  // Whether to use an asymptotically slower (for >2 inputs) but stabler method
  // of computing the gradient for the PROD operation. (No effect for SUM op.)
  // 是否使用渐近较慢(用于> 2个输入),但是计算PROD操作的梯度的稳定方法。(SUM操作无效)
  optional bool stable_prod_grad = 3 [default = true];
}

// Message that stores parameters used by ELULayer
message ELUParameter {
  // Described in:
  // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
  // Deep Network Learning by Exponential Linear Units (ELUs). arXiv
  optional float alpha = 1 [default = 1];
}

// Message that stores parameters used by EmbedLayer
// 被用作嵌入层的存储参数的消息结构
message EmbedParameter {
  optional uint32 num_output = 1; // The number of outputs for the layer
  // The input is given as integers to be interpreted as one-hot
  // vector indices with dimension num_input.  Hence num_input should be
  // 1 greater than the maximum possible input value.
  // 输入作为整数给出,以被解释为具有num_input维的一热矢量索引。
  // 因此数值输入应该大于最大可能输入值的1。
  optional uint32 input_dim = 2;

  optional bool bias_term = 3 [default = true]; // Whether to use a bias term
  optional FillerParameter weight_filler = 4; // The filler for the weight
  optional FillerParameter bias_filler = 5; // The filler for the bias

}

// Message that stores parameters used by ExpLayer
// 被用作指数层的存储参数的消息结构
message ExpParameter {
  // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0.
  // Or if base is set to the default (-1), base is set to e,
  // so y = exp(shift + scale * x).
  optional float base = 1 [default = -1.0];
  optional float scale = 2 [default = 1.0];
  optional float shift = 3 [default = 0.0];
}

/// Message that stores parameters used by FlattenLayer
///  被用作打散层的存储参数的消息结构
message FlattenParameter {
  // The first axis to flatten: all preceding axes are retained in the output.
  // May be negative to index from the end (e.g., -1 for the last axis).
  optional int32 axis = 1 [default = 1];
  // 第一轴展平:所有前面的轴都保留在输出中。
  // 可以从末尾开始索引(例如,对于最后一个轴为-1)。
  
  // The last axis to flatten: all following axes are retained in the output.
  // May be negative to index from the end (e.g., the default -1 for the last
  // axis).
  optional int32 end_axis = 2 [default = -1];
}

// Message that stores parameters used by HDF5DataLayer
//  被用作  HDF5数据 层的存储参数的消息结构
message HDF5DataParameter {
  // Specify the data source.
  optional string source = 1;
  // Specify the batch size.
  optional uint32 batch_size = 2;

  // Specify whether to shuffle the data.
  // If shuffle == true, the ordering of the HDF5 files is shuffled,
  // and the ordering of data within any given HDF5 file is shuffled,
  // but data between different files are not interleaved; all of a file's
  // data are output (in a random order) before moving onto another file.
  // 如果shuffle == true,则HDF5文件的顺序被混洗,并且任何给定HDF5文件内的数据的顺序被混洗,
  // 但是不同文件之间的数据不交错; 所有文件的数据在移动到另一个文件之前被输出(以随机顺序)。
  optional bool shuffle = 3 [default = false];
}

message HDF5OutputParameter {
  optional string file_name = 1;
}

message HingeLossParameter {
  enum Norm {
    L1 = 1;
    L2 = 2;
  }
  // Specify the Norm to use L1 or L2
  // 指定正则化为L1或者L2,默认为L1
  optional Norm norm = 1 [default = L1];
}

message ImageDataParameter {
  // Specify the data source.
  optional string source = 1;
  // Specify the batch size.
  optional uint32 batch_size = 4 [default = 1];
  // The rand_skip variable is for the data layer to skip a few data points
  // to avoid all asynchronous sgd clients to start at the same point. The skip
  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  // be larger than the number of keys in the database.
  optional uint32 rand_skip = 7 [default = 0];
  // Whether or not ImageLayer should shuffle the list of files at every epoch.
  // 是否图像层应该在每个时期打乱文件列表。默认不打乱
  optional bool shuffle = 8 [default = false];
  // It will also resize images if new_height or new_width are not zero.
  // 如果图像新的高度和宽度不是零,那么更改图像尺寸
  optional uint32 new_height = 9 [default = 0];
  optional uint32 new_width = 10 [default = 0];
  // Specify if the images are color or gray
  // 指定图像是彩色的还是灰度的
  optional bool is_color = 11 [default = true];
  // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  // simple scaling and subtracting the data mean, if provided. Note that the
  // mean subtraction is always carried out before scaling.
  optional float scale = 2 [default = 1];
  optional string mean_file = 3;
  // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  // crop an image.
  optional uint32 crop_size = 5 [default = 0];
  // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  // data.
  optional bool mirror = 6 [default = false];
  optional string root_folder = 12 [default = ""];
}

message InfogainLossParameter {
  // Specify the infogain matrix source.
  // 指定 infogain的矩阵数据源
  optional string source = 1;
}

message InnerProductParameter {
  optional uint32 num_output = 1; // The number of outputs for the layer
  optional bool bias_term = 2 [default = true]; // whether to have bias terms
  optional FillerParameter weight_filler = 3; // The filler for the weight
  optional FillerParameter bias_filler = 4; // The filler for the bias

  // The first axis to be lumped into a single inner product computation;
  // all preceding axes are retained in the output.
  // 第一个轴要集中到单个内积计算中;所有前面的轴都保留在输出中。
  // May be negative to index from the end (e.g., -1 for the last axis).
  optional int32 axis = 5 [default = 1];
}

// Message that stores parameters used by LogLayer
//    被用作对数 层的存储参数的消息结构
message LogParameter {
  // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0.
  // Or if base is set to the default (-1), base is set to e,
  // so y = ln(shift + scale * x) = log_e(shift + scale * x)
  optional float base = 1 [default = -1.0];
  optional float scale = 2 [default = 1.0];
  optional float shift = 3 [default = 0.0];
}

// Message that stores parameters used by LRNLayer
//    被用作LRN 层的存储参数的消息结构
message LRNParameter {
  optional uint32 local_size = 1 [default = 5];
  optional float alpha = 2 [default = 1.];
  optional float beta = 3 [default = 0.75];
  enum NormRegion {
    ACROSS_CHANNELS = 0;
    WITHIN_CHANNEL = 1;
  }
  optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
  optional float k = 5 [default = 1.];
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 6 [default = DEFAULT];
}

message MemoryDataParameter {
  optional uint32 batch_size = 1;
  optional uint32 channels = 2;
  optional uint32 height = 3;
  optional uint32 width = 4;
}

message MVNParameter {
  // This parameter can be set to false to normalize mean only
  // 此参数可以设置为假以仅对平均值进行标准化
  optional bool normalize_variance = 1 [default = true];

  // This parameter can be set to true to perform DNN-like MVN
  // 此参数可以设置为真对类似DNN的MVN
  optional bool across_channels = 2 [default = false];

  // Epsilon for not dividing by zero while normalizing variance
  optional float eps = 3 [default = 1e-9];
}

message PoolingParameter {
  enum PoolMethod {
    MAX = 0; //最大值
    AVE = 1; // 平均值
    STOCHASTIC = 2;// 随机
  }
  optional PoolMethod pool = 1 [default = MAX]; // The pooling method
  // Pad, kernel size, and stride are all given as a single value for equal
  // dimensions in height and width or as Y, X pairs.
  optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
  optional uint32 pad_h = 9 [default = 0]; // The padding height:填充
  optional uint32 pad_w = 10 [default = 0]; // The padding width
  optional uint32 kernel_size = 2; // The kernel size (square)
  optional uint32 kernel_h = 5; // The kernel height:内核
  optional uint32 kernel_w = 6; // The kernel width
  optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
  optional uint32 stride_h = 7; // The stride height:步进
  optional uint32 stride_w = 8; // The stride width
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 11 [default = DEFAULT];
  // If global_pooling then it will pool over the size of the bottom by doing
  // kernel_h = bottom->height and kernel_w = bottom->width
  // 如果全局池化那么它将通过做池底部的大小
  // kernel_h = bottom-> height,kernel_w = bottom-> width
  optional bool global_pooling = 12 [default = false];
}

message PowerParameter {
  // PowerLayer computes outputs y = (shift + scale * x) ^ power.
  optional float power = 1 [default = 1.0];
  optional float scale = 2 [default = 1.0];
  optional float shift = 3 [default = 0.0];
}

message PythonParameter {
  optional string module = 1;
  optional string layer = 2;
  // This value is set to the attribute `param_str` of the `PythonLayer` object
  // in Python before calling the `setup()` method. This could be a number,
  // string, dictionary in Python dict format, JSON, etc. You may parse this
  // string in `setup` method and use it in `forward` and `backward`.
  // 该值设置为`PythonLayer`对象的`param_str`属性
  // 在Python中调用`setup()`方法。 这可能是一个数字,
  // 字符串,Python dict格式的字典,JSON等。你可以解析这个
  // 字符串在`setup`方法中,并在`forward`和`backward`中使用它。
  optional string param_str = 3 [default = ''];
  // Whether this PythonLayer is shared among worker solvers during data parallelism.
  // If true, each worker solver sequentially run forward from this layer.
  // This value should be set true if you are using it as a data layer.
  // 在数据并行期间,这个Python层是否在工作者解算器之间共享。
  // 如果为真,则每个工作解算器从该层顺序地向前运行。
  // 如果将其用作数据层,则此值应设置为真。
  optional bool share_in_parallel = 4 [default = false];
}

// Message that stores parameters used by ReductionLayer
//    被用作减少层 的存储参数的消息结构
message ReductionParameter {
  enum ReductionOp {
    SUM = 1;
    ASUM = 2;
    SUMSQ = 3;
    MEAN = 4;
  }

  optional ReductionOp operation = 1 [default = SUM]; // reduction operation

  // The first axis to reduce to a scalar -- may be negative to index from the
  // end (e.g., -1 for the last axis).
  // (Currently, only reduction along ALL "tail" axes is supported; reduction
  // of axis M through N, where N < num_axes - 1, is unsupported.)
  // Suppose we have an n-axis bottom Blob with shape:
  //     (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).
  // 减小到标量的第一轴 - 可以从端部索引为负(例如,对于最后一个轴为-1)。
  // (目前,仅支持沿着所有“尾”轴的缩减;轴M到N的缩减,其中N <num_axes-1不被支持)
  // 假设我们有一个n轴底部Blob形状:
  // (d0,d1,d2,...,d(m-1),dm,d(m + 1),...,d(n-1))。
  // If axis == m, the output Blob will have shape
  //     (d0, d1, d2, ..., d(m-1)),
  // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1))
  // times, each including (dm * d(m+1) * ... * d(n-1)) individual data.
  // If axis == 0 (the default), the output Blob always has the empty shape
  // (count 1), performing reduction across the entire input --
  // often useful for creating new loss functions.
  // 如果axis == m,则输出Blob将具有形状(d0,d1,d2,...,d(m-1)),
  // 并执行reduceOp操作(d0 * d1 * d2 * ... * d(m-1))
  // 每个包括(dm * d(m + 1)* ... * d(n-1))个体数据。
  // 如果axis == 0(默认值),输出Blob总是具有空的形状
  // (计数1),在整个输入执行减少 -  通常用于创建新的损失函数
  optional int32 axis = 2 [default = 0];

  optional float coeff = 3 [default = 1.0]; // coefficient for output
}

// Message that stores parameters used by ReLULayer
//  被用作ReLU层 的存储参数的消息结构
message ReLUParameter {
  // Allow non-zero slope for negative inputs to speed up optimization
  // 对负输入允许非零斜率以加速优化,在下面这篇文章中描述
  // Described in:
  // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities
  // improve neural network acoustic models. In ICML Workshop on Deep Learning
  // for Audio, Speech, and Language Processing.
  optional float negative_slope = 1 [default = 0];
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 2 [default = DEFAULT];
}

message ReshapeParameter {
  // Specify the output dimensions. If some of the dimensions are set to 0,
  // the corresponding dimension from the bottom layer is used (unchanged).
  // Exactly one dimension may be set to -1, in which case its value is
  // inferred from the count of the bottom blob and the remaining dimensions.
  // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8:
  // 指定输出尺寸。 如果某些维度设置为0,  使用来自底层的相应尺寸(不变)。
  // 正好一个维度可以设置为-1,在这种情况下,其值为
  // 从底部斑点的数量和剩余尺寸推断。  例如,假设我们想用形状2×8重塑二维块“输入”:
  //   layer {
  //     type: "Reshape" bottom: "input" top: "output"
  //     reshape_param { ... }
  //   }
  //
  // If "input" is 2D with shape 2 x 8, then the following reshape_param
  // specifications are all equivalent, producing a 3D blob "output" with shape
  // 2 x 2 x 4:
  // 如果输入2D信息为  2 x 8,那么以下reshape_param规范都是等效的,
  // 从而产生具有形状的3D团块“输出”   2×2×4:
  //
  //   reshape_param { shape { dim:  2  dim: 2  dim:  4 } }
  //   reshape_param { shape { dim:  0  dim: 2  dim:  4 } }
  //   reshape_param { shape { dim:  0  dim: 2  dim: -1 } }
  //   reshape_param { shape { dim: -1  dim: 0  dim:  2 } }
  //
  optional BlobShape shape = 1;

  // axis and num_axes control the portion of the bottom blob's shape that are
  // replaced by (included in) the reshape. By default (axis == 0 and
  // num_axes == -1), the entire bottom blob shape is included in the reshape,
  // and hence the shape field must specify the entire output shape.
  // axis和num_axes控制底部blob的形状的部分  替换为(包含)重塑。 默认情况下(axis == 0和
  // num_axes == -1),整个底部斑点形状包括在重塑中,  因此形状字段必须指定整个输出形状。
  // axis may be non-zero to retain some portion of the beginning of the input
  // shape (and may be negative to index from the end; e.g., -1 to begin the
  // reshape after the last axis, including nothing in the reshape,
  // -2 to include only the last axis, etc.).
  // 轴可以是非零的,以保留输入的开始的一些部分  形状(并且可以从末端索引为负;例如,-1开始
  // 在最后一个轴后重塑,包括没有什么在重塑,  -2仅包括最后一个轴等)。
  //
  // For example, suppose "input" is a 2D blob with shape 2 x 8.
  // Then the following ReshapeLayer specifications are all equivalent,
  // producing a blob "output" with shape 2 x 2 x 4:
  //
  //   reshape_param { shape { dim: 2  dim: 2  dim: 4 } }
  //   reshape_param { shape { dim: 2  dim: 4 } axis:  1 }
  //   reshape_param { shape { dim: 2  dim: 4 } axis: -3 }
  //
  // num_axes specifies the extent of the reshape.
  // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on
  // input axes in the range [axis, axis+num_axes].
  // num_axes may also be -1, the default, to include all remaining axes
  // (starting from axis).
  // num_axes指定重塑的范围。  如果num_axes> = 0(且轴> = 0),则将仅执行整形
  // 输入轴在[axis,axis + num_axes]范围内。  num_axes也可以是默认值-1,以包括所有其余轴  (从轴开始)。
  //
  // For example, suppose "input" is a 2D blob with shape 2 x 8.
  // Then the following ReshapeLayer specifications are equivalent,
  // producing a blob "output" with shape 1 x 2 x 8.
  //
  //   reshape_param { shape { dim:  1  dim: 2  dim:  8 } }
  //   reshape_param { shape { dim:  1  dim: 2  }  num_axes: 1 }
  //   reshape_param { shape { dim:  1  }  num_axes: 0 }
  //
  // On the other hand, these would produce output blob shape 2 x 1 x 8:
  //
  //   reshape_param { shape { dim: 2  dim: 1  dim: 8  }  }
  //   reshape_param { shape { dim: 1 }  axis: 1  num_axes: 0 }
  //
  optional int32 axis = 2 [default = 0];
  optional int32 num_axes = 3 [default = -1];
}

// Message that stores parameters used by ROIPoolingLayer
//  被用作ROI池化层 的存储参数的消息结构
message ROIPoolingParameter {
  // Pad, kernel size, and stride are all given as a single value for equal
  // dimensions in height and width or as Y, X pairs.
  optional uint32 pooled_h = 1 [default = 0]; // The pooled output height
  optional uint32 pooled_w = 2 [default = 0]; // The pooled output width
  // Multiplicative spatial scale factor to translate ROI coords from their
  // input scale to the scale used when pooling
  // 乘法空间比例因子,用于将ROI坐标从其输入量表转换为合并时使用的量表
  optional float spatial_scale = 3 [default = 1];
}

message ScaleParameter {
  // The first axis of bottom[0] (the first input Blob) along which to apply
  // bottom[1] (the second input Blob).  May be negative to index from the end
  // (e.g., -1 for the last axis).
  //
  // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
  // top[0] will have the same shape, and bottom[1] may have any of the
  // following shapes (for the given value of axis):
  //    (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
  //    (axis == 1 == -3)          3;     3x40;     3x40x60
  //    (axis == 2 == -2)                   40;       40x60
  //    (axis == 3 == -1)                                60
  // Furthermore, bottom[1] may have the empty shape (regardless of the value of
  // "axis") -- a scalar multiplier.
  optional int32 axis = 1 [default = 1];

  // (num_axes is ignored unless just one bottom is given and the scale is
  // a learned parameter of the layer.  Otherwise, num_axes is determined by the
  // number of axes by the second bottom.)
  // (忽略num_axes,除非只给出一个底部,并且比例为  该层的学习参数。 否则,num_axes由
  // 轴的数量由第二个底部决定。)
  // The number of axes of the input (bottom[0]) covered by the scale
  // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
  // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.
  // 由数轴的输入(  bottom[0] )的尺度 参数覆盖,或-1覆盖从`axis`开始的底部[0]的所有轴。
  // 设置num_axes为0,乘以零轴Blob(一个标量)。
  optional int32 num_axes = 2 [default = 1];

  // (filler is ignored unless just one bottom is given and the scale is
  // a learned parameter of the layer.)
  // 仅有一个bottom的时候填充被忽略,尺度就是该层的学习参数
  // The initialization for the learned scale parameter.
  // 学习尺度参数的初始化.
  // Default is the unit (1) initialization, resulting in the ScaleLayer
  // initially performing the identity operation.
  // 默认单元(1)初始化, 从而在尺度层初始化执行单位操作。
  optional FillerParameter filler = 3;

  // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but
  // may be more efficient).  Initialized with bias_filler (defaults to 0).
  optional bool bias_term = 4 [default = false];
  optional FillerParameter bias_filler = 5;
}

message SigmoidParameter {
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 1 [default = DEFAULT];
}

message SmoothL1LossParameter {
  // SmoothL1Loss(x) =
  //   0.5 * (sigma * x) ** 2    -- if x < 1.0 / sigma / sigma
  //   |x| - 0.5 / sigma / sigma -- otherwise
  optional float sigma = 1 [default = 1];
}

message SliceParameter {
  // The axis along which to slice -- may be negative to index from the end
  // (e.g., -1 for the last axis).
  // By default, SliceLayer concatenates blobs along the "channels" axis (1).
  optional int32 axis = 3 [default = 1];
  repeated uint32 slice_point = 2;

  // DEPRECATED: alias for "axis" -- does not support negative indexing.
  optional uint32 slice_dim = 1 [default = 1];
}

// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
//  被用作Softmax, SoftmaxWithLoss 的存储参数的消息结构
message SoftmaxParameter {
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 1 [default = DEFAULT];

  // The axis along which to perform the softmax -- may be negative to index
  // from the end (e.g., -1 for the last axis).
  // 跟着softmax的主轴执行,可能是负数,如果是负数,则为倒数最后一个
  // Any other axes will be evaluated as independent softmaxes.
  // 其他任何轴被评为softmax的独立值
  optional int32 axis = 2 [default = 1];
}

message TanHParameter {
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 1 [default = DEFAULT];
}

// Message that stores parameters used by TileLayer
//  被用作TileLayer 的存储参数的消息结构
message TileParameter {
  // The index of the axis to tile.
  optional int32 axis = 1 [default = 1];

  // The number of copies (tiles) of the blob to output.
  optional int32 tiles = 2;
}

// Message that stores parameters used by ThresholdLayer
//  被用作阈值化 的存储参数的消息结构
message ThresholdParameter {
  optional float threshold = 1 [default = 0]; // Strictly positive values //必须为正数
}

message WindowDataParameter {
  // Specify the data source.//指定数据源
  optional string source = 1; //源名
  // For data pre-processing, we can do simple scaling and subtracting the
  // data mean, if provided. Note that the mean subtraction is always carried
  // out before scaling.
  optional float scale = 2 [default = 1];//尺度
  optional string mean_file = 3;//均值文件
  // Specify the batch size.
  optional uint32 batch_size = 4;//块文件
  // Specify if we would like to randomly crop an image.//是否指定随机剪裁图像
  optional uint32 crop_size = 5 [default = 0];//剪裁尺寸
  // Specify if we want to randomly mirror data. //是否指定随机镜像图像
  optional bool mirror = 6 [default = false];//默认不镜像
  // Foreground (object) overlap threshold //前景(对象)重叠阈值
  optional float fg_threshold = 7 [default = 0.5];//默认0.5
  // Background (non-object) overlap threshold //背景(对象)重叠阈值
  optional float bg_threshold = 8 [default = 0.5]; //默认0.5
  // Fraction of batch that should be foreground objects//块的一部分可能是前景一部分
  optional float fg_fraction = 9 [default = 0.25];//默认值为0.25
  // Amount of contextual padding to add around a window
  // 在窗口周围上文添加量
  // (used only by the window_data_layer) //仅用作窗口数据层
  optional uint32 context_pad = 10 [default = 0];
  // Mode for cropping out a detection window
  // 剪裁检测窗口的模式
  // warp: cropped window is warped to a fixed size and aspect ratio
  // 拉伸:将窗口拉伸到固定尺寸和纵横比大小
  // square: the tightest square around the window is cropped
  // 方形:按照窗口的最小矩形选定大小
  optional string crop_mode = 11 [default = "warp"]; //默认为“warp   模式
  // cache_images: will load all images in memory for faster access
  // 缓存图片:将所有图片导入到内存中用来加快访问速度
  optional bool cache_images = 12 [default = false];
  // append root_folder to locate images
  // 添加根目录路径以查找图片
  optional string root_folder = 13 [default = ""];
}

message SPPParameter {
  enum PoolMethod {
    MAX = 0;
    AVE = 1;
    STOCHASTIC = 2;
  }
  optional uint32 pyramid_height = 1;
  optional PoolMethod pool = 2 [default = MAX]; // The pooling method //池化方法:最大值
  enum Engine {
    DEFAULT = 0;
    CAFFE = 1;
    CUDNN = 2;
  }
  optional Engine engine = 6 [default = DEFAULT];
}

// DEPRECATED: use LayerParameter.
message V1LayerParameter {
  repeated string bottom = 2;
  repeated string top = 3;
  optional string name = 4;
  repeated NetStateRule include = 32;
  repeated NetStateRule exclude = 33;
  enum LayerType {
    NONE = 0;
    ABSVAL = 35;
    ACCURACY = 1;
    ARGMAX = 30;
    BNLL = 2;
    CONCAT = 3;
    CONTRASTIVE_LOSS = 37;
    CONVOLUTION = 4;
    DATA = 5;
    DECONVOLUTION = 39;
    DROPOUT = 6;
    DUMMY_DATA = 32;
    EUCLIDEAN_LOSS = 7;
    ELTWISE = 25;
    EXP = 38;
    FLATTEN = 8;
    HDF5_DATA = 9;
    HDF5_OUTPUT = 10;
    HINGE_LOSS = 28;
    IM2COL = 11;
    IMAGE_DATA = 12;
    INFOGAIN_LOSS = 13;
    INNER_PRODUCT = 14;
    LRN = 15;
    MEMORY_DATA = 29;
    MULTINOMIAL_LOGISTIC_LOSS = 16;
    MVN = 34;
    POOLING = 17;
    POWER = 26;
    RELU = 18;
    SIGMOID = 19;
    SIGMOID_CROSS_ENTROPY_LOSS = 27;
    SILENCE = 36;
    SOFTMAX = 20;
    SOFTMAX_LOSS = 21;
    SPLIT = 22;
    SLICE = 33;
    TANH = 23;
    WINDOW_DATA = 24;
    THRESHOLD = 31;
  }
  optional LayerType type = 5;
  repeated BlobProto blobs = 6;
  repeated string param = 1001;
  repeated DimCheckMode blob_share_mode = 1002;
  enum DimCheckMode {
    STRICT = 0;
    PERMISSIVE = 1;
  }
  repeated float blobs_lr = 7;
  repeated float weight_decay = 8;
  repeated float loss_weight = 35;
  optional AccuracyParameter accuracy_param = 27;
  optional ArgMaxParameter argmax_param = 23;
  optional ConcatParameter concat_param = 9;
  optional ContrastiveLossParameter contrastive_loss_param = 40;
  optional ConvolutionParameter convolution_param = 10;
  optional DataParameter data_param = 11;
  optional DropoutParameter dropout_param = 12;
  optional DummyDataParameter dummy_data_param = 26;
  optional EltwiseParameter eltwise_param = 24;
  optional ExpParameter exp_param = 41;
  optional HDF5DataParameter hdf5_data_param = 13;
  optional HDF5OutputParameter hdf5_output_param = 14;
  optional HingeLossParameter hinge_loss_param = 29;
  optional ImageDataParameter image_data_param = 15;
  optional InfogainLossParameter infogain_loss_param = 16;
  optional InnerProductParameter inner_product_param = 17;
  optional LRNParameter lrn_param = 18;
  optional MemoryDataParameter memory_data_param = 22;
  optional MVNParameter mvn_param = 34;
  optional PoolingParameter pooling_param = 19;
  optional PowerParameter power_param = 21;
  optional ReLUParameter relu_param = 30;
  optional SigmoidParameter sigmoid_param = 38;
  optional SoftmaxParameter softmax_param = 39;
  optional SliceParameter slice_param = 31;
  optional TanHParameter tanh_param = 37;
  optional ThresholdParameter threshold_param = 25;
  optional WindowDataParameter window_data_param = 20;
  optional TransformationParameter transform_param = 36;
  optional LossParameter loss_param = 42;
  optional V0LayerParameter layer = 1;
}

// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters
// in Caffe.  We keep this message type around for legacy support.
//  舍弃: V0LayerParameter参数是Caffe中指定层参数的旧方式。 此处保留是为了向下兼容考虑。
message V0LayerParameter {
  optional string name = 1; // the layer name
  optional string type = 2; // the string to specify the layer type

  // Parameters to specify layers with inner products.
  // 指定层与层之间内积参数
  optional uint32 num_output = 3; // The number of outputs for the layer
  optional bool biasterm = 4 [default = true]; // whether to have bias terms
  optional FillerParameter weight_filler = 5; // The filler for the weight
  optional FillerParameter bias_filler = 6; // The filler for the bias

  optional uint32 pad = 7 [default = 0]; // The padding size
  optional uint32 kernelsize = 8; // The kernel size
  optional uint32 group = 9 [default = 1]; // The group size for group conv
  optional uint32 stride = 10 [default = 1]; // The stride
  enum PoolMethod {
    MAX = 0;
    AVE = 1;
    STOCHASTIC = 2;
  }
  optional PoolMethod pool = 11 [default = MAX]; // The pooling method
  optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio //默认衰减因子0.5

  optional uint32 local_size = 13 [default = 5]; // for local response norm//默认局部尺寸5
  optional float alpha = 14 [default = 1.]; // for local response norm//alpha = 1
  optional float beta = 15 [default = 0.75]; // for local response norm//beta = 0.75
  optional float k = 22 [default = 1.];// k = 1

  // For data layers, specify the data source
  optional string source = 16;
  // For data pre-processing, we can do simple scaling and subtracting the
  // data mean, if provided. Note that the mean subtraction is always carried
  // out before scaling.
  optional float scale = 17 [default = 1];
  optional string meanfile = 18;
  // For data layers, specify the batch size.
  optional uint32 batchsize = 19;
  // For data layers, specify if we would like to randomly crop an image.
  optional uint32 cropsize = 20 [default = 0];
  // For data layers, specify if we want to randomly mirror data.
  optional bool mirror = 21 [default = false];

  // The blobs containing the numeric parameters of the layer
  repeated BlobProto blobs = 50;
  // The ratio that is multiplied on the global learning rate. If you want to
  // set the learning ratio for one blob, you need to set it for all blobs.
  repeated float blobs_lr = 51;
  // The weight decay that is multiplied on the global weight decay.
  repeated float weight_decay = 52;

  // The rand_skip variable is for the data layer to skip a few data points
  // to avoid all asynchronous sgd clients to start at the same point. The skip
  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  // be larger than the number of keys in the database.
  // rand_skip变量用于数据层跳过几个数据点
  // 以避免所有异步sgd客户端在同一点启动。 跳过
  // 点将被设置为rand_skip * rand(0,1)。 请注意,rand_skip不应该
  // 大于数据库中的关键点数。
  optional uint32 rand_skip = 53 [default = 0];

  // Fields related to detection (det_*)
  // foreground (object) overlap threshold // 前景重叠阈值
  optional float det_fg_threshold = 54 [default = 0.5];
  // background (non-object) overlap threshold  // 背景重叠阈值
  optional float det_bg_threshold = 55 [default = 0.5];
  // Fraction of batch that should be foreground objects
  // 图像块组的部分应该前景的概率,默认值为0.25
  optional float det_fg_fraction = 56 [default = 0.25];

  // optional bool OBSOLETE_can_clobber = 57 [default = true];

  // Amount of contextual padding to add around a window
  // 在窗口周围添加的上下填充量,仅被用作窗口数据层
  // (used only by the window_data_layer)
  optional uint32 det_context_pad = 58 [default = 0];

  // Mode for cropping out a detection window
  // warp: cropped window is warped to a fixed size and aspect ratio
  // square: the tightest square around the window is cropped
  optional string det_crop_mode = 59 [default = "warp"];

  // For ReshapeLayer, one needs to specify the new dimensions.
  // 对于Reshape层,需要指定新维度。
  optional int32 new_num = 60 [default = 0];
  optional int32 new_channels = 61 [default = 0];
  optional int32 new_height = 62 [default = 0];
  optional int32 new_width = 63 [default = 0];

  // Whether or not ImageLayer should shuffle the list of files at every epoch.
  // It will also resize images if new_height or new_width are not zero.
  // 图像层是否应该在每个时期打乱文件列表
  // 如果新的宽度和高度不为零,那么图像将会重新指定尺寸
  optional bool shuffle_images = 64 [default = false];

  // For ConcatLayer, one needs to specify the dimension for concatenation, and
  // the other dimensions must be the same for all the bottom blobs.
  // By default it will concatenate blobs along the channels dimension.
  // 对于ConcatLayer,需要指定用于级联的维度
  // 所有底部斑点的其他尺寸必须相同。
  // 默认情况下,它将沿通道维度连接blob。
  optional uint32 concat_dim = 65 [default = 1];

  optional HDF5OutputParameter hdf5_output_param = 1001;
}

message PReLUParameter {
  // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:
  // Surpassing Human-Level Performance on ImageNet Classification, 2015.
  //  参数ReLU出自于 Delving Deep into Rectifiers: Surpassing Human-Level Performance
  // on ImageNet Classification文章
   
  // Initial value of a_i. Default is a_i=0.25 for all i.
  // a_i的初始值,默认为0.25
  optional FillerParameter filler = 1;
  // Whether or not slope paramters are shared across channels.
  //  是否 跨通道共享 斜率参数
  optional bool channel_shared = 2 [default = false];

}


限于能力有限,代码注释中定有错误和纰漏,望大家指正,后续持续修改ing...

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值