Caffe各层的参数设置(在prototxt文件中设置)

Caffe各层的参数设置(在prototxt文件中设置)

这些参数定义在include/caffe/proto/caffe.proto文件中

如果要查看某种层的参数,请展开本文的目录,点击相应的条目来跳转

 

syntax= "proto2";


packagecaffe;


//Specifies the shape (dimensions) of a Blob.

messageBlobShape {

repeatedint64 dim = 1 [packed = true];

}


messageBlobProto {

optionalBlobShape shape = 7;

repeatedfloat data = 5 [packed = true];

repeatedfloat diff = 6 [packed = true];

repeateddouble double_data = 8 [packed = true];

repeateddouble double_diff = 9 [packed = true];


//4D dimensions -- deprecated. Use "shape" instead.

optionalint32 num = 1 [default = 0];

optionalint32 channels = 2 [default = 0];

optionalint32 height = 3 [default = 0];

optionalint32 width = 4 [default = 0];

}


//The BlobProtoVector is simply a way to pass multiple blobprotoinstances

//around.

messageBlobProtoVector {

repeatedBlobProto blobs = 1;

}


messageDatum {

optionalint32 channels = 1;

optionalint32 height = 2;

optionalint32 width = 3;

//the actual image data, in bytes

optionalbytes data = 4;

optionalint32 label = 5;

//Optionally, the datum could also hold float data.

repeatedfloat float_data = 6;

//If true data contains an encoded image that need to be decoded

optionalbool encoded = 7 [default = false];

}


messageFillerParameter {

//The filler type.

optionalstring type = 1 [default = 'constant'];

optionalfloat value = 2 [default = 0]; // the value in constant filler

optionalfloat min = 3 [default = 0]; // the min value in uniform filler

optionalfloat max = 4 [default = 1]; // the max value in uniform filler

optionalfloat mean = 5 [default = 0]; // the mean value in Gaussian filler

optionalfloat std = 6 [default = 1]; // the std value in Gaussian filler

//The expected number of non-zero output weights for a given input in

//Gaussian filler -- the default -1 means don't perform sparsification.

optionalint32 sparse = 7 [default = -1];

//Normalize the filler variance by fan_in, fan_out, or their average.

//Applies to 'xavier' and 'msra' fillers.

enumVarianceNorm {

FAN_IN= 0;

FAN_OUT= 1;

AVERAGE= 2;

}

optionalVarianceNorm variance_norm = 8 [default = FAN_IN];

}


messageNetParameter {

optionalstring name = 1; // consider giving the network a name

//DEPRECATED. See InputParameter. The input blobs to the network.

repeatedstring input = 3;

//DEPRECATED. See InputParameter. The shape of the input blobs.

repeatedBlobShape input_shape = 8;


//4D input dimensions -- deprecated. Use "input_shape"instead.

//If specified, for each input blob there should be four

//values specifying the num, channels, height and width of the inputblob.

//Thus, there should be a total of (4 * #input) numbers.

repeatedint32 input_dim = 4;


//Whether the network will force every layer to carry out backwardoperation.

//If set False, then whether to carry out backward is determined

//automatically according to the net structure and learning rates.

optionalbool force_backward = 5 [default = false];

//The current "state" of the network, including the phase,level, and stage.

//Some layers may be included/excluded depending on this state and thestates

//specified in the layers' include and exclude fields.

optionalNetState state = 6;


//Print debugging information about results while running Net::Forward,

//Net::Backward, and Net::Update.

optionalbool debug_info = 7 [default = false];


//The layers that make up the net. Each of their configurations,including

//connectivity and behavior, is specified as a LayerParameter.

repeatedLayerParameter layer = 100; // ID 100 so layers are printed last.


//DEPRECATED: use 'layer' instead.

repeatedV1LayerParameter layers = 2;

}


//NOTE

//Update the next available ID when you add a new SolverParameterfield.

//

//SolverParameter next available ID: 42 (last added: layer_wise_reduce)

messageSolverParameter {

//

//Specifying the train and test networks

//

//Exactly one train net must be specified using one of the followingfields:

// train_net_param, train_net, net_param, net

//One or more test nets may be specified using any of the followingfields:

// test_net_param, test_net, net_param, net

//If more than one test net field is specified (e.g., both net and

//test_net are specified), they will be evaluated in the field ordergiven

//above: (1) test_net_param, (2) test_net, (3) net_param/net.

//A test_iter must be specified for each test_net.

//A test_level and/or a test_stage may also be specified for eachtest_net.

//


//Proto filename for the train net, possibly combined with one or more

//test nets.

optionalstring net = 24;

//Inline train net param, possibly combined with one or more test nets.

optionalNetParameter net_param = 25;


optionalstring train_net = 1; // Proto filename for the train net.

repeatedstring test_net = 2; // Proto filenames for the test nets.

optionalNetParameter train_net_param = 21; // Inline train net params.

repeatedNetParameter test_net_param = 22; // Inline test net params.


//The states for the train/test nets. Must be unspecified or

//specified once per net.

//

//By default, train_state will have phase = TRAIN,

//and all test_state's will have phase = TEST.

//Other defaults are set according to the NetState defaults.

optionalNetState train_state = 26;

repeatedNetState test_state = 27;


//The number of iterations for each test net.

repeatedint32 test_iter = 3;


//The number of iterations between two testing phases.

optionalint32 test_interval = 4 [default = 0];

optionalbool test_compute_loss = 19 [default = false];

//If true, run an initial test pass before the first iteration,

//ensuring memory availability and printing the starting value of theloss.

optionalbool test_initialization = 32 [default = true];

optionalfloat base_lr = 5; // The base learning rate

//the number of iterations between displaying info. If display = 0, noinfo

//will be displayed.

optionalint32 display = 6;

//Display the loss averaged over the last average_loss iterations

optionalint32 average_loss = 33 [default = 1];

optionalint32 max_iter = 7; // the maximum number of iterations

//accumulate gradients over `iter_size` x `batch_size` instances

optionalint32 iter_size = 36 [default = 1];


//The learning rate decay policy. The currently implemented learningrate

//policies are as follows:

// - fixed: always return base_lr.

// - step: return base_lr * gamma ^ (floor(iter / step))

// - exp: return base_lr * gamma ^ iter

// - inv: return base_lr * (1 + gamma * iter) ^ (- power)

// - multistep: similar to step but it allows non uniform stepsdefined by

// stepvalue

// - poly: the effective learning rate follows a polynomial decay, tobe

// zero by the max_iter. return base_lr (1 - iter/max_iter) ^(power)

// - sigmoid: the effective learning rate follows a sigmod decay

// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))

//

//where base_lr, max_iter, gamma, step, stepvalue and power are defined

//in the solver parameter protocol buffer, and iter is the currentiteration.

optionalstring lr_policy = 8;

optionalfloat gamma = 9; // The parameter to compute the learning rate.

optionalfloat power = 10; // The parameter to compute the learning rate.

optionalfloat momentum = 11; // The momentum value.

optionalfloat weight_decay = 12; // The weight decay.

//regularization types supported: L1 and L2

//controlled by weight_decay

optionalstring regularization_type = 29 [default = "L2"];

//the stepsize for learning rate policy "step"

optionalint32 stepsize = 13;

//the stepsize for learning rate policy "multistep"

repeatedint32 stepvalue = 34;


//Set clip_gradients to >= 0 to clip parameter gradients to that L2norm,

//whenever their actual L2 norm is larger.

optionalfloat clip_gradients = 35 [default = -1];


optionalint32 snapshot = 14 [default = 0]; // The snapshot interval

optionalstring snapshot_prefix = 15; // The prefix for the snapshot.

//whether to snapshot diff in the results or not. Snapshotting diffwill help

//debugging but the final protocol buffer size will be much larger.

optionalbool snapshot_diff = 16 [default = false];

enumSnapshotFormat {

HDF5= 0;

BINARYPROTO= 1;

}

optionalSnapshotFormat snapshot_format = 37 [default = BINARYPROTO];

//the mode solver will use: 0 for CPU and 1 for GPU. Use GPU indefault.

enumSolverMode {

CPU= 0;

GPU= 1;

}

optionalSolverMode solver_mode = 17 [default = GPU];

//the device_id will that be used in GPU mode. Use device_id = 0 indefault.

optionalint32 device_id = 18 [default = 0];

//If non-negative, the seed with which the Solver will initialize theCaffe

//random number generator -- useful for reproducible results.Otherwise,

//(and by default) initialize using a seed derived from the systemclock.

optionalint64 random_seed = 20 [default = -1];


//type of the solver

optionalstring type = 40 [default = "SGD"];


//numerical stability for RMSProp, AdaGrad and AdaDelta and Adam

optionalfloat delta = 31 [default = 1e-8];

//parameters for the Adam solver

optionalfloat momentum2 = 39 [default = 0.999];


//RMSProp decay value

//MeanSquare(t) = rms_decay*MeanSquare(t-1) +(1-rms_decay)*SquareGradient(t)

optionalfloat rms_decay = 38 [default = 0.99];


//If true, print information about the state of the net that may helpwith

//debugging learning problems.

optionalbool debug_info = 23 [default = false];


//If false, don't save a snapshot after training finishes.

optionalbool snapshot_after_train = 28 [default = true];


//DEPRECATED: old solver enum types, use string instead

enumSolverType {

SGD= 0;

NESTEROV= 1;

ADAGRAD= 2;

RMSPROP= 3;

ADADELTA= 4;

ADAM= 5;

}

//DEPRECATED: use type instead of solver_type

optionalSolverType solver_type = 30 [default = SGD];


//Overlap compute and communication for data parallel training

optionalbool layer_wise_reduce = 41 [default = true];

}


//A message that stores the solver snapshots

messageSolverState {

optionalint32 iter = 1; // The current iteration

optionalstring learned_net = 2; // The file that stores the learned net.

repeatedBlobProto history = 3; // The history for sgd solvers

optionalint32 current_step = 4 [default = 0]; // The current step forlearning rate

}


enumPhase {

TRAIN= 0;

TEST= 1;

}


messageNetState {

optionalPhase phase = 1 [default = TEST];

optionalint32 level = 2 [default = 0];

repeatedstring stage = 3;

}


messageNetStateRule {

//Set phase to require the NetState have a particular phase (TRAIN orTEST)

//to meet this rule.

optionalPhase phase = 1;


//Set the minimum and/or maximum levels in which the layer should beused.

//Leave undefined to meet the rule regardless of level.

optionalint32 min_level = 2;

optionalint32 max_level = 3;


//Customizable sets of stages to include or exclude.

//The net must have ALL of the specified stages and NONE of thespecified

//"not_stage"s to meet the rule.

//(Use multiple NetStateRules to specify conjunctions of stages.)

repeatedstring stage = 4;

repeatedstring not_stage = 5;

}


//Specifies training parameters (multipliers on global learningconstants,

//and the name and other settings used for weight sharing).

messageParamSpec {

//The names of the parameter blobs -- useful for sharing parametersamong

//layers, but never required otherwise. To share a parameter betweentwo

//layers, give it a (non-empty) name.

optionalstring name = 1;


//Whether to require shared weights to have the same shape, or just thesame

//count -- defaults to STRICT if unspecified.

optionalDimCheckMode share_mode = 2;

enumDimCheckMode {

//STRICT (default) requires that num, channels, height, width eachmatch.

STRICT= 0;

//PERMISSIVE requires only the count (num*channels*height*width) tomatch.

PERMISSIVE= 1;

}


//The multiplier on the global learning rate for this parameter.

optionalfloat lr_mult = 3 [default = 1.0];


//The multiplier on the global weight decay for this parameter.

optionalfloat decay_mult = 4 [default = 1.0];

}


//NOTE

//Update the next available ID when you add a new LayerParameter field.

//

//LayerParameter next available layer-specific ID: 147 (last added:recurrent_param)

messageLayerParameter {

optionalstring name = 1; // the layer name

optionalstring type = 2; // the layer type

repeatedstring bottom = 3; // the name of each bottom blob

repeatedstring top = 4; // the name of each top blob


//The train / test phase for computation.

optionalPhase phase = 10;


//The amount of weight to assign each top blob in the objective.

//Each layer assigns a default value, usually of either 0 or 1,

//to each top blob.

repeatedfloat loss_weight = 5;


//Specifies training parameters (multipliers on global learningconstants,

//and the name and other settings used for weight sharing).

repeatedParamSpec param = 6;


//The blobs containing the numeric parameters of the layer.

repeatedBlobProto blobs = 7;


//Specifies whether to backpropagate to each bottom. If unspecified,

//Caffe will automatically infer whether each input needsbackpropagation

//to compute parameter gradients. If set to true for some inputs,

//backpropagation to those inputs is forced; if set false for someinputs,

//backpropagation to those inputs is skipped.

//

//The size must be either 0 or equal to the number of bottoms.

repeatedbool propagate_down = 11;


//Rules controlling whether and when a layer is included in thenetwork,

//based on the current NetState. You may specify a non-zero number ofrules

//to include OR exclude, but not both. If no include or exclude rulesare

//specified, the layer is always included. If the current NetStatemeets

//ANY (i.e., one or more) of the specified rules, the layer is

//included/excluded.

repeatedNetStateRule include = 8;

repeatedNetStateRule exclude = 9;


//Parameters for data pre-processing.

optionalTransformationParameter transform_param = 100;


//Parameters shared by loss layers.

optionalLossParameter loss_param = 101;


//Layer type-specific parameters.

//

//Note: certain layers may have more than one computational engine

//for their implementation. These layers include an Engine type and

//engine parameter for selecting the implementation.

//The default for the engine is set by the ENGINE switch atcompile-time.

optionalAccuracyParameter accuracy_param = 102;

optionalArgMaxParameter argmax_param = 103;

optionalBatchNormParameter batch_norm_param = 139;

optionalBiasParameter bias_param = 141;

optionalConcatParameter concat_param = 104;

optionalContrastiveLossParameter contrastive_loss_param = 105;

optionalConvolutionParameter convolution_param = 106;

optionalCropParameter crop_param = 144;

optionalDataParameter data_param = 107;

optionalDropoutParameter dropout_param = 108;

optionalDummyDataParameter dummy_data_param = 109;

optionalEltwiseParameter eltwise_param = 110;

optionalELUParameter elu_param = 140;

optionalEmbedParameter embed_param = 137;

optionalExpParameter exp_param = 111;

optionalFlattenParameter flatten_param = 135;

optionalHDF5DataParameter hdf5_data_param = 112;

optionalHDF5OutputParameter hdf5_output_param = 113;

optionalHingeLossParameter hinge_loss_param = 114;

optionalImageDataParameter image_data_param = 115;

optionalInfogainLossParameter infogain_loss_param = 116;

optionalInnerProductParameter inner_product_param = 117;

optionalInputParameter input_param = 143;

optionalLogParameter log_param = 134;

optionalLRNParameter lrn_param = 118;

optionalMemoryDataParameter memory_data_param = 119;

optionalMVNParameter mvn_param = 120;

optionalParameterParameter parameter_param = 145;

optionalPoolingParameter pooling_param = 121;

optionalPowerParameter power_param = 122;

optionalPReLUParameter prelu_param = 131;

optionalPythonParameter python_param = 130;

optionalRecurrentParameter recurrent_param = 146;

optionalReductionParameter reduction_param = 136;

optionalReLUParameter relu_param = 123;

optionalReshapeParameter reshape_param = 133;

optionalScaleParameter scale_param = 142;

optionalSigmoidParameter sigmoid_param = 124;

optionalSoftmaxParameter softmax_param = 125;

optionalSPPParameter spp_param = 132;

optionalSliceParameter slice_param = 126;

optionalTanHParameter tanh_param = 127;

optionalThresholdParameter threshold_param = 128;

optionalTileParameter tile_param = 138;

optionalWindowDataParameter window_data_param = 129;

}


//Message that stores parameters used to apply transformation

//to the data layer's data

messageTransformationParameter {

//For data pre-processing, we can do simple scaling and subtracting the

//data mean, if provided. Note that the mean subtraction is alwayscarried

//out before scaling.

optionalfloat scale = 1 [default = 1];

//Specify if we want to randomly mirror data.

optionalbool mirror = 2 [default = false];

//Specify if we would like to randomly crop an image.

optionaluint32 crop_size = 3 [default = 0];

//mean_file and mean_value cannot be specified at the same time

optionalstring mean_file = 4;

//if specified can be repeated once (would subtract it from all thechannels)

//or can be repeated the same number of times as channels

//(would subtract them from the corresponding channel)

repeatedfloat mean_value = 5;

//Force the decoded image to have 3 color channels.

optionalbool force_color = 6 [default = false];

//Force the decoded image to have 1 color channels.

optionalbool force_gray = 7 [default = false];

}


//Message that stores parameters shared by loss layers

messageLossParameter {

//If specified, ignore instances with the given label.

optionalint32 ignore_label = 1;

//How to normalize the loss for loss layers that aggregate acrossbatches,

//spatial dimensions, or other dimensions. Currently only implementedin

//SoftmaxWithLoss and SigmoidCrossEntropyLoss layers.

enumNormalizationMode {

//Divide by the number of examples in the batch times spatialdimensions.

//Outputs that receive the ignore label will NOT be ignored incomputing

//the normalization factor.

FULL= 0;

//Divide by the total number of output locations that do not take the

//ignore_label. If ignore_label is not set, this behaves like FULL.

VALID= 1;

//Divide by the batch size.

BATCH_SIZE= 2;

//Do not normalize the loss.

NONE= 3;

}

//For historical reasons, the default normalization for

//SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID.

optionalNormalizationMode normalization = 3 [default = VALID];

//Deprecated. Ignored if normalization is specified. If normalization

//is not specified, then setting this to false will be equivalent to

//normalization = BATCH_SIZE to be consistent with previous behavior.

optionalbool normalize = 2;

}


//Messages that store parameters used by individual layer types follow,in

//alphabetical order.


messageAccuracyParameter {

//When computing accuracy, count as correct by comparing the true labelto

//the top k scoring classes. By default, only compare to the topscoring

//class (i.e. argmax).

optionaluint32 top_k = 1 [default = 1];


//The "label" axis of the prediction blob, whose argmaxcorresponds to the

//predicted label -- may be negative to index from the end (e.g., -1for the

//last axis). For example, if axis == 1 and the predictions are

//(N x C x H x W), the label blob is expected to contain N*H*W groundtruth

//labels with integer values in {0, 1, ..., C-1}.

optionalint32 axis = 2 [default = 1];


//If specified, ignore instances with the given label.

optionalint32 ignore_label = 3;

}


messageArgMaxParameter {

//If true produce pairs (argmax, maxval)

optionalbool out_max_val = 1 [default = false];

optionaluint32 top_k = 2 [default = 1];

//The axis along which to maximise -- may be negative to index from the

//end (e.g., -1 for the last axis).

//By default ArgMaxLayer maximizes over the flattened trailingdimensions

//for each index of the first / num dimension.

optionalint32 axis = 3;

}


messageConcatParameter {

//The axis along which to concatenate -- may be negative to index fromthe

//end (e.g., -1 for the last axis). Other axes must have the

//same dimension for all the bottom blobs.

//By default, ConcatLayer concatenates blobs along the "channels"axis (1).

optionalint32 axis = 2 [default = 1];


//DEPRECATED: alias for "axis" -- does not support negativeindexing.

optionaluint32 concat_dim = 1 [default = 1];

}


messageBatchNormParameter {

//If false, normalization is performed over the current mini-batch

//and global statistics are accumulated (but not yet used) by a moving

//average.

//If true, those accumulated mean and variance values are used for the

//normalization.

//By default, it is set to false when the network is in the training

//phase and true when the network is in the testing phase.

optionalbool use_global_stats = 1;

//What fraction of the moving average remains each iteration?

//Smaller values make the moving average decay faster, giving more

//weight to the recent values.

//Each iteration updates the moving average @f$S_{t-1}@f$ with the

//current mean @f$ Y_t @f$ by

//@f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$

//is the moving_average_fraction parameter.

optionalfloat moving_average_fraction = 2 [default = .999];

//Small value to add to the variance estimate so that we don't divideby

//zero.

optionalfloat eps = 3 [default = 1e-5];

}


messageBiasParameter {

//The first axis of bottom[0] (the first input Blob) along which toapply

//bottom[1] (the second input Blob). May be negative to index from theend

//(e.g., -1 for the last axis).

//

//For example, if bottom[0] is 4D with shape 100x3x40x60, the output

//top[0] will have the same shape, and bottom[1] may have any of the

//following shapes (for the given value of axis):

// (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60

// (axis == 1 == -3) 3; 3x40; 3x40x60

// (axis == 2 == -2) 40; 40x60

// (axis == 3 == -1) 60

//Furthermore, bottom[1] may have the empty shape (regardless of thevalue of

//"axis") -- a scalar bias.

optionalint32 axis = 1 [default = 1];


//(num_axes is ignored unless just one bottom is given and the bias is

//a learned parameter of the layer. Otherwise, num_axes is determinedby the

//number of axes by the second bottom.)

//The number of axes of the input (bottom[0]) covered by the bias

//parameter, or -1 to cover all axes of bottom[0] starting from `axis`.

//Set num_axes := 0, to add a zero-axis Blob: a scalar.

optionalint32 num_axes = 2 [default = 1];


//(filler is ignored unless just one bottom is given and the bias is

//a learned parameter of the layer.)

//The initialization for the learned bias parameter.

//Default is the zero (0) initialization, resulting in the BiasLayer

//initially performing the identity operation.

optionalFillerParameter filler = 3;

}


messageContrastiveLossParameter {

//margin for dissimilar pair

optionalfloat margin = 1 [default = 1.0];

//The first implementation of this cost did not exactly match the costof

//Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.

//legacy_version = false (the default) uses (margin - d)^2 as proposedin the

//Hadsell paper. New models should probably use this version.

//legacy_version = true uses (margin - d^2). This is kept to support /

//reproduce existing models and results

optionalbool legacy_version = 2 [default = false];

}


messageConvolutionParameter {

optionaluint32 num_output = 1; // The number of outputs for the layer

optionalbool bias_term = 2 [default = true]; // whether to have bias terms


//Pad, kernel size, and stride are all given as a single value forequal

//dimensions in all spatial dimensions, or once per spatial dimension.

repeateduint32 pad = 3; // The padding size; defaults to 0

repeateduint32 kernel_size = 4; // The kernel size

repeateduint32 stride = 6; // The stride; defaults to 1

//Factor used to dilate the kernel, (implicitly) zero-filling theresulting

//holes. (Kernel dilation is sometimes referred to by its use in the

//algorithme à trous from Holschneider et al. 1987.)

repeateduint32 dilation = 18; // The dilation; defaults to 1


//For 2D convolution only, the *_h and *_w versions may also be used to

//specify both spatial dimensions.

optionaluint32 pad_h = 9 [default = 0]; // The padding height (2D only)

optionaluint32 pad_w = 10 [default = 0]; // The padding width (2D only)

optionaluint32 kernel_h = 11; // The kernel height (2D only)

optionaluint32 kernel_w = 12; // The kernel width (2D only)

optionaluint32 stride_h = 13; // The stride height (2D only)

optionaluint32 stride_w = 14; // The stride width (2D only)


optionaluint32 group = 5 [default = 1]; // The group size for group conv


optionalFillerParameter weight_filler = 7; // The filler for the weight

optionalFillerParameter bias_filler = 8; // The filler for the bias

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 15 [default = DEFAULT];


//The axis to interpret as "channels" when performingconvolution.

//Preceding dimensions are treated as independent inputs;

//succeeding dimensions are treated as "spatial".

//With (N, C, H, W) inputs, and axis == 1 (the default), we perform

//N independent 2D convolutions, sliding C-channel (or (C/g)-channels,for

//groups g>1) filters across the spatial axes (H, W) of the input.

//With (N, C, D, H, W) inputs, and axis == 1, we perform

//N independent 3D convolutions, sliding (C/g)-channels

//filters across the spatial axes (D, H, W) of the input.

optionalint32 axis = 16 [default = 1];


//Whether to force use of the general ND convolution, even if aspecific

//implementation for blobs of the appropriate number of spatialdimensions

//is available. (Currently, there is only a 2D-specific convolution

//implementation; for input blobs with num_axes != 2, this option is

//ignored and the ND implementation will be used.)

optionalbool force_nd_im2col = 17 [default = false];

}


messageCropParameter {

//To crop, elements of the first bottom are selected to fit thedimensions

//of the second, reference bottom. The crop is configured by

//- the crop `axis` to pick the dimensions for cropping

//- the crop `offset` to set the shift for all/each dimension

//to align the cropped bottom with the reference bottom.

//All dimensions up to but excluding `axis` are preserved, while

//the dimensions including and trailing `axis` are cropped.

//If only one `offset` is set, then all dimensions are offset by thisamount.

//Otherwise, the number of offsets must equal the number of croppedaxes to

//shift the crop in each dimension accordingly.

//Note: standard dimensions are N,C,H,W so the default is a spatialcrop,

//and `axis` may be negative to index from the end (e.g., -1 for thelast

//axis).

optionalint32 axis = 1 [default = 2];

repeateduint32 offset = 2;

}


messageDataParameter {

enumDB {

LEVELDB= 0;

LMDB= 1;

}

//Specify the data source.

optionalstring source = 1;

//Specify the batch size.

optionaluint32 batch_size = 4;

//The rand_skip variable is for the data layer to skip a few datapoints

//to avoid all asynchronous sgd clients to start at the same point. Theskip

//point would be set as rand_skip * rand(0,1). Note that rand_skipshould not

//be larger than the number of keys in the database.

//DEPRECATED. Each solver accesses a different subset of the database.

optionaluint32 rand_skip = 7 [default = 0];

optionalDB backend = 8 [default = LEVELDB];

//DEPRECATED. See TransformationParameter. For data pre-processing, wecan do

//simple scaling and subtracting the data mean, if provided. Note thatthe

//mean subtraction is always carried out before scaling.

optionalfloat scale = 2 [default = 1];

optionalstring mean_file = 3;

//DEPRECATED. See TransformationParameter. Specify if we would like torandomly

//crop an image.

optionaluint32 crop_size = 5 [default = 0];

//DEPRECATED. See TransformationParameter. Specify if we want torandomly mirror

//data.

optionalbool mirror = 6 [default = false];

//Force the encoded image to have 3 color channels

optionalbool force_encoded_color = 9 [default = false];

//Prefetch queue (Increase if data feeding bandwidth varies, within the

//limit of device memory for GPU training)

optionaluint32 prefetch = 10 [default = 4];

}


messageDropoutParameter {

optionalfloat dropout_ratio = 1 [default = 0.5]; // dropout ratio

}


//DummyDataLayer fills any number of arbitrarily shaped blobs withrandom

//(or constant) data generated by "Fillers" (see "messageFillerParameter").

messageDummyDataParameter {

//This layer produces N >= 1 top blobs. DummyDataParameter mustspecify 1 or N

//shape fields, and 0, 1 or N data_fillers.

//

//If 0 data_fillers are specified, ConstantFiller with a value of 0 isused.

//If 1 data_filler is specified, it is applied to all top blobs. If Nare

//specified, the ith is applied to the ith top blob.

repeatedFillerParameter data_filler = 1;

repeatedBlobShape shape = 6;


//4D dimensions -- deprecated. Use "shape" instead.

repeateduint32 num = 2;

repeateduint32 channels = 3;

repeateduint32 height = 4;

repeateduint32 width = 5;

}


messageEltwiseParameter {

enumEltwiseOp {

PROD= 0;

SUM= 1;

MAX= 2;

}

optionalEltwiseOp operation = 1 [default = SUM]; // element-wise operation

repeatedfloat coeff = 2; // blob-wise coefficient for SUM operation


//Whether to use an asymptotically slower (for >2 inputs) butstabler method

//of computing the gradient for the PROD operation. (No effect for SUMop.)

optionalbool stable_prod_grad = 3 [default = true];

}


//Message that stores parameters used by ELULayer

messageELUParameter {

//Described in:

//Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fastand Accurate

//Deep Network Learning by Exponential Linear Units (ELUs). arXiv

optionalfloat alpha = 1 [default = 1];

}


//Message that stores parameters used by EmbedLayer

messageEmbedParameter {

optionaluint32 num_output = 1; // The number of outputs for the layer

//The input is given as integers to be interpreted as one-hot

//vector indices with dimension num_input. Hence num_input should be

//1 greater than the maximum possible input value.

optionaluint32 input_dim = 2;


optionalbool bias_term = 3 [default = true]; // Whether to use a bias term

optionalFillerParameter weight_filler = 4; // The filler for the weight

optionalFillerParameter bias_filler = 5; // The filler for the bias


}


//Message that stores parameters used by ExpLayer

messageExpParameter {

//ExpLayer computes outputs y = base ^ (shift + scale * x), for base >0.

//Or if base is set to the default (-1), base is set to e,

//so y = exp(shift + scale * x).

optionalfloat base = 1 [default = -1.0];

optionalfloat scale = 2 [default = 1.0];

optionalfloat shift = 3 [default = 0.0];

}


///Message that stores parameters used by FlattenLayer

messageFlattenParameter {

//The first axis to flatten: all preceding axes are retained in theoutput.

//May be negative to index from the end (e.g., -1 for the last axis).

optionalint32 axis = 1 [default = 1];


//The last axis to flatten: all following axes are retained in theoutput.

//May be negative to index from the end (e.g., the default -1 for thelast

//axis).

optionalint32 end_axis = 2 [default = -1];

}


//Message that stores parameters used by HDF5DataLayer

messageHDF5DataParameter {

//Specify the data source.

optionalstring source = 1;

//Specify the batch size.

optionaluint32 batch_size = 2;


//Specify whether to shuffle the data.

//If shuffle == true, the ordering of the HDF5 files is shuffled,

//and the ordering of data within any given HDF5 file is shuffled,

//but data between different files are not interleaved; all of a file's

//data are output (in a random order) before moving onto another file.

optionalbool shuffle = 3 [default = false];

}


messageHDF5OutputParameter {

optionalstring file_name = 1;

}


messageHingeLossParameter {

enumNorm {

L1= 1;

L2= 2;

}

//Specify the Norm to use L1 or L2

optionalNorm norm = 1 [default = L1];

}


messageImageDataParameter {

//Specify the data source.

optionalstring source = 1;

//Specify the batch size.

optionaluint32 batch_size = 4 [default = 1];

//The rand_skip variable is for the data layer to skip a few datapoints

//to avoid all asynchronous sgd clients to start at the same point. Theskip

//point would be set as rand_skip * rand(0,1). Note that rand_skipshould not

//be larger than the number of keys in the database.

optionaluint32 rand_skip = 7 [default = 0];

//Whether or not ImageLayer should shuffle the list of files at everyepoch.

optionalbool shuffle = 8 [default = false];

//It will also resize images if new_height or new_width are not zero.

optionaluint32 new_height = 9 [default = 0];

optionaluint32 new_width = 10 [default = 0];

//Specify if the images are color or gray

optionalbool is_color = 11 [default = true];

//DEPRECATED. See TransformationParameter. For data pre-processing, wecan do

//simple scaling and subtracting the data mean, if provided. Note thatthe

//mean subtraction is always carried out before scaling.

optionalfloat scale = 2 [default = 1];

optionalstring mean_file = 3;

//DEPRECATED. See TransformationParameter. Specify if we would like torandomly

//crop an image.

optionaluint32 crop_size = 5 [default = 0];

//DEPRECATED. See TransformationParameter. Specify if we want torandomly mirror

//data.

optionalbool mirror = 6 [default = false];

optionalstring root_folder = 12 [default = ""];

}


messageInfogainLossParameter {

//Specify the infogain matrix source.

optionalstring source = 1;

}


messageInnerProductParameter {

optionaluint32 num_output = 1; // The number of outputs for the layer

optionalbool bias_term = 2 [default = true]; // whether to have bias terms

optionalFillerParameter weight_filler = 3; // The filler for the weight

optionalFillerParameter bias_filler = 4; // The filler for the bias


//The first axis to be lumped into a single inner product computation;

//all preceding axes are retained in the output.

//May be negative to index from the end (e.g., -1 for the last axis).

optionalint32 axis = 5 [default = 1];

//Specify whether to transpose the weight matrix or not.

//If transpose == true, any operations will be performed on thetranspose

//of the weight matrix. The weight matrix itself is not going to betransposed

//but rather the transfer flag of operations will be toggledaccordingly.

optionalbool transpose = 6 [default = false];

}


messageInputParameter {

//This layer produces N >= 1 top blob(s) to be assigned manually.

//Define N shapes to set a shape for each top.

//Define 1 shape to set the same shape for every top.

//Define no shape to defer to reshaping manually.

repeatedBlobShape shape = 1;

}


//Message that stores parameters used by LogLayer

messageLogParameter {

//LogLayer computes outputs y = log_base(shift + scale * x), for base >0.

//Or if base is set to the default (-1), base is set to e,

//so y = ln(shift + scale * x) = log_e(shift + scale * x)

optionalfloat base = 1 [default = -1.0];

optionalfloat scale = 2 [default = 1.0];

optionalfloat shift = 3 [default = 0.0];

}


//Message that stores parameters used by LRNLayer

messageLRNParameter {

optionaluint32 local_size = 1 [default = 5];

optionalfloat alpha = 2 [default = 1.];

optionalfloat beta = 3 [default = 0.75];

enumNormRegion {

ACROSS_CHANNELS= 0;

WITHIN_CHANNEL= 1;

}

optionalNormRegion norm_region = 4 [default = ACROSS_CHANNELS];

optionalfloat k = 5 [default = 1.];

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 6 [default = DEFAULT];

}


messageMemoryDataParameter {

optionaluint32 batch_size = 1;

optionaluint32 channels = 2;

optionaluint32 height = 3;

optionaluint32 width = 4;

}


messageMVNParameter {

//This parameter can be set to false to normalize mean only

optionalbool normalize_variance = 1 [default = true];


//This parameter can be set to true to perform DNN-like MVN

optionalbool across_channels = 2 [default = false];


//Epsilon for not dividing by zero while normalizing variance

optionalfloat eps = 3 [default = 1e-9];

}


messageParameterParameter {

optionalBlobShape shape = 1;

}


messagePoolingParameter {

enumPoolMethod {

MAX= 0;

AVE= 1;

STOCHASTIC= 2;

}

optionalPoolMethod pool = 1 [default = MAX]; // The pooling method

//Pad, kernel size, and stride are all given as a single value forequal

//dimensions in height and width or as Y, X pairs.

optionaluint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)

optionaluint32 pad_h = 9 [default = 0]; // The padding height

optionaluint32 pad_w = 10 [default = 0]; // The padding width

optionaluint32 kernel_size = 2; // The kernel size (square)

optionaluint32 kernel_h = 5; // The kernel height

optionaluint32 kernel_w = 6; // The kernel width

optionaluint32 stride = 3 [default = 1]; // The stride (equal in Y, X)

optionaluint32 stride_h = 7; // The stride height

optionaluint32 stride_w = 8; // The stride width

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 11 [default = DEFAULT];

//If global_pooling then it will pool over the size of the bottom bydoing

//kernel_h = bottom->height and kernel_w = bottom->width

optionalbool global_pooling = 12 [default = false];

}


messagePowerParameter {

//PowerLayer computes outputs y = (shift + scale * x) ^ power.

optionalfloat power = 1 [default = 1.0];

optionalfloat scale = 2 [default = 1.0];

optionalfloat shift = 3 [default = 0.0];

}


messagePythonParameter {

optionalstring module = 1;

optionalstring layer = 2;

//This value is set to the attribute `param_str` of the `PythonLayer`object

//in Python before calling the `setup()` method. This could be anumber,

//string, dictionary in Python dict format, JSON, etc. You may parsethis

//string in `setup` method and use it in `forward` and `backward`.

optionalstring param_str = 3 [default = ''];

//Whether this PythonLayer is shared among worker solvers during dataparallelism.

//If true, each worker solver sequentially run forward from this layer.

//This value should be set true if you are using it as a data layer.

optionalbool share_in_parallel = 4 [default = false];

}


//Message that stores parameters used by RecurrentLayer

messageRecurrentParameter {

//The dimension of the output (and usually hidden state) representation--

//must be explicitly set to non-zero.

optionaluint32 num_output = 1 [default = 0];


optionalFillerParameter weight_filler = 2; // The filler for the weight

optionalFillerParameter bias_filler = 3; // The filler for the bias


//Whether to enable displaying debug_info in the unrolled recurrentnet.

optionalbool debug_info = 4 [default = false];


//Whether to add as additional inputs (bottoms) the initial hiddenstate

//blobs, and add as additional outputs (tops) the final timestep hiddenstate

//blobs. The number of additional bottom/top blobs required depends onthe

//recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs.

optionalbool expose_hidden = 5 [default = false];

}


//Message that stores parameters used by ReductionLayer

messageReductionParameter {

enumReductionOp {

SUM= 1;

ASUM= 2;

SUMSQ= 3;

MEAN= 4;

}


optionalReductionOp operation = 1 [default = SUM]; // reduction operation


//The first axis to reduce to a scalar -- may be negative to index fromthe

//end (e.g., -1 for the last axis).

//(Currently, only reduction along ALL "tail" axes issupported; reduction

//of axis M through N, where N < num_axes - 1, is unsupported.)

//Suppose we have an n-axis bottom Blob with shape:

// (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).

//If axis == m, the output Blob will have shape

// (d0, d1, d2, ..., d(m-1)),

//and the ReductionOp operation is performed (d0 * d1 * d2 * ... *d(m-1))

//times, each including (dm * d(m+1) * ... * d(n-1)) individual data.

//If axis == 0 (the default), the output Blob always has the emptyshape

//(count 1), performing reduction across the entire input --

//often useful for creating new loss functions.

optionalint32 axis = 2 [default = 0];


optionalfloat coeff = 3 [default = 1.0]; // coefficient for output

}


//Message that stores parameters used by ReLULayer

messageReLUParameter {

//Allow non-zero slope for negative inputs to speed up optimization

//Described in:

//Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifiernonlinearities

//improve neural network acoustic models. In ICML Workshop on DeepLearning

//for Audio, Speech, and Language Processing.

optionalfloat negative_slope = 1 [default = 0];

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 2 [default = DEFAULT];

}


messageReshapeParameter {

//Specify the output dimensions. If some of the dimensions are set to0,

//the corresponding dimension from the bottom layer is used(unchanged).

//Exactly one dimension may be set to -1, in which case its value is

//inferred from the count of the bottom blob and the remainingdimensions.

//For example, suppose we want to reshape a 2D blob "input"with shape 2 x 8:

//

// layer {

// type: "Reshape" bottom: "input" top: "output"

// reshape_param { ... }

// }

//

//If "input" is 2D with shape 2 x 8, then the followingreshape_param

//specifications are all equivalent, producing a 3D blob "output"with shape

//2 x 2 x 4:

//

// reshape_param { shape { dim: 2 dim: 2 dim: 4 } }

// reshape_param { shape { dim: 0 dim: 2 dim: 4 } }

// reshape_param { shape { dim: 0 dim: 2 dim: -1 } }

// reshape_param { shape { dim: 0 dim:-1 dim: 4 } }

//

optionalBlobShape shape = 1;


//axis and num_axes control the portion of the bottom blob's shape thatare

//replaced by (included in) the reshape. By default (axis == 0 and

//num_axes == -1), the entire bottom blob shape is included in thereshape,

//and hence the shape field must specify the entire output shape.

//

//axis may be non-zero to retain some portion of the beginning of theinput

//shape (and may be negative to index from the end; e.g., -1 to beginthe

//reshape after the last axis, including nothing in the reshape,

//-2 to include only the last axis, etc.).

//

//For example, suppose "input" is a 2D blob with shape 2 x 8.

//Then the following ReshapeLayer specifications are all equivalent,

//producing a blob "output" with shape 2 x 2 x 4:

//

// reshape_param { shape { dim: 2 dim: 2 dim: 4 } }

// reshape_param { shape { dim: 2 dim: 4 } axis: 1 }

// reshape_param { shape { dim: 2 dim: 4 } axis: -3 }

//

//num_axes specifies the extent of the reshape.

//If num_axes >= 0 (and axis >= 0), the reshape will be performedonly on

//input axes in the range [axis, axis+num_axes].

//num_axes may also be -1, the default, to include all remaining axes

//(starting from axis).

//

//For example, suppose "input" is a 2D blob with shape 2 x 8.

//Then the following ReshapeLayer specifications are equivalent,

//producing a blob "output" with shape 1 x 2 x 8.

//

// reshape_param { shape { dim: 1 dim: 2 dim: 8 } }

// reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 }

// reshape_param { shape { dim: 1 } num_axes: 0 }

//

//On the other hand, these would produce output blob shape 2 x 1 x 8:

//

// reshape_param { shape { dim: 2 dim: 1 dim: 8 } }

// reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 }

//

optionalint32 axis = 2 [default = 0];

optionalint32 num_axes = 3 [default = -1];

}


messageScaleParameter {

//The first axis of bottom[0] (the first input Blob) along which toapply

//bottom[1] (the second input Blob). May be negative to index from theend

//(e.g., -1 for the last axis).

//

//For example, if bottom[0] is 4D with shape 100x3x40x60, the output

//top[0] will have the same shape, and bottom[1] may have any of the

//following shapes (for the given value of axis):

// (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60

// (axis == 1 == -3) 3; 3x40; 3x40x60

// (axis == 2 == -2) 40; 40x60

// (axis == 3 == -1) 60

//Furthermore, bottom[1] may have the empty shape (regardless of thevalue of

//"axis") -- a scalar multiplier.

optionalint32 axis = 1 [default = 1];


//(num_axes is ignored unless just one bottom is given and the scale is

//a learned parameter of the layer. Otherwise, num_axes is determinedby the

//number of axes by the second bottom.)

//The number of axes of the input (bottom[0]) covered by the scale

//parameter, or -1 to cover all axes of bottom[0] starting from `axis`.

//Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.

optionalint32 num_axes = 2 [default = 1];


//(filler is ignored unless just one bottom is given and the scale is

//a learned parameter of the layer.)

//The initialization for the learned scale parameter.

//Default is the unit (1) initialization, resulting in the ScaleLayer

//initially performing the identity operation.

optionalFillerParameter filler = 3;


//Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer,but

//may be more efficient). Initialized with bias_filler (defaults to0).

optionalbool bias_term = 4 [default = false];

optionalFillerParameter bias_filler = 5;

}


messageSigmoidParameter {

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 1 [default = DEFAULT];

}


messageSliceParameter {

//The axis along which to slice -- may be negative to index from theend

//(e.g., -1 for the last axis).

//By default, SliceLayer concatenates blobs along the "channels"axis (1).

optionalint32 axis = 3 [default = 1];

repeateduint32 slice_point = 2;


//DEPRECATED: alias for "axis" -- does not support negativeindexing.

optionaluint32 slice_dim = 1 [default = 1];

}


//Message that stores parameters used by SoftmaxLayer,SoftmaxWithLossLayer

messageSoftmaxParameter {

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 1 [default = DEFAULT];


//The axis along which to perform the softmax -- may be negative toindex

//from the end (e.g., -1 for the last axis).

//Any other axes will be evaluated as independent softmaxes.

optionalint32 axis = 2 [default = 1];

}


messageTanHParameter {

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 1 [default = DEFAULT];

}


//Message that stores parameters used by TileLayer

messageTileParameter {

//The index of the axis to tile.

optionalint32 axis = 1 [default = 1];


//The number of copies (tiles) of the blob to output.

optionalint32 tiles = 2;

}


//Message that stores parameters used by ThresholdLayer

messageThresholdParameter {

optionalfloat threshold = 1 [default = 0]; // Strictly positive values

}


messageWindowDataParameter {

//Specify the data source.

optionalstring source = 1;

//For data pre-processing, we can do simple scaling and subtracting the

//data mean, if provided. Note that the mean subtraction is alwayscarried

//out before scaling.

optionalfloat scale = 2 [default = 1];

optionalstring mean_file = 3;

//Specify the batch size.

optionaluint32 batch_size = 4;

//Specify if we would like to randomly crop an image.

optionaluint32 crop_size = 5 [default = 0];

//Specify if we want to randomly mirror data.

optionalbool mirror = 6 [default = false];

//Foreground (object) overlap threshold

optionalfloat fg_threshold = 7 [default = 0.5];

//Background (non-object) overlap threshold

optionalfloat bg_threshold = 8 [default = 0.5];

//Fraction of batch that should be foreground objects

optionalfloat fg_fraction = 9 [default = 0.25];

//Amount of contextual padding to add around a window

//(used only by the window_data_layer)

optionaluint32 context_pad = 10 [default = 0];

//Mode for cropping out a detection window

//warp: cropped window is warped to a fixed size and aspect ratio

//square: the tightest square around the window is cropped

optionalstring crop_mode = 11 [default = "warp"];

//cache_images: will load all images in memory for faster access

optionalbool cache_images = 12 [default = false];

//append root_folder to locate images

optionalstring root_folder = 13 [default = ""];

}


messageSPPParameter {

enumPoolMethod {

MAX= 0;

AVE= 1;

STOCHASTIC= 2;

}

optionaluint32 pyramid_height = 1;

optionalPoolMethod pool = 2 [default = MAX]; // The pooling method

enumEngine {

DEFAULT= 0;

CAFFE= 1;

CUDNN= 2;

}

optionalEngine engine = 6 [default = DEFAULT];

}


//DEPRECATED: use LayerParameter.

messageV1LayerParameter {

repeatedstring bottom = 2;

repeatedstring top = 3;

optionalstring name = 4;

repeatedNetStateRule include = 32;

repeatedNetStateRule exclude = 33;

enumLayerType {

NONE= 0;

ABSVAL= 35;

ACCURACY= 1;

ARGMAX= 30;

BNLL= 2;

CONCAT= 3;

CONTRASTIVE_LOSS= 37;

CONVOLUTION= 4;

DATA= 5;

DECONVOLUTION= 39;

DROPOUT= 6;

DUMMY_DATA= 32;

EUCLIDEAN_LOSS= 7;

ELTWISE= 25;

EXP= 38;

FLATTEN= 8;

HDF5_DATA= 9;

HDF5_OUTPUT= 10;

HINGE_LOSS= 28;

IM2COL= 11;

IMAGE_DATA= 12;

INFOGAIN_LOSS= 13;

INNER_PRODUCT= 14;

LRN= 15;

MEMORY_DATA= 29;

MULTINOMIAL_LOGISTIC_LOSS= 16;

MVN= 34;

POOLING= 17;

POWER= 26;

RELU= 18;

SIGMOID= 19;

SIGMOID_CROSS_ENTROPY_LOSS= 27;

SILENCE= 36;

SOFTMAX= 20;

SOFTMAX_LOSS= 21;

SPLIT= 22;

SLICE= 33;

TANH= 23;

WINDOW_DATA= 24;

THRESHOLD= 31;

}

optionalLayerType type = 5;

repeatedBlobProto blobs = 6;

repeatedstring param = 1001;

repeatedDimCheckMode blob_share_mode = 1002;

enumDimCheckMode {

STRICT= 0;

PERMISSIVE= 1;

}

repeatedfloat blobs_lr = 7;

repeatedfloat weight_decay = 8;

repeatedfloat loss_weight = 35;

optionalAccuracyParameter accuracy_param = 27;

optionalArgMaxParameter argmax_param = 23;

optionalConcatParameter concat_param = 9;

optionalContrastiveLossParameter contrastive_loss_param = 40;

optionalConvolutionParameter convolution_param = 10;

optionalDataParameter data_param = 11;

optionalDropoutParameter dropout_param = 12;

optionalDummyDataParameter dummy_data_param = 26;

optionalEltwiseParameter eltwise_param = 24;

optionalExpParameter exp_param = 41;

optionalHDF5DataParameter hdf5_data_param = 13;

optionalHDF5OutputParameter hdf5_output_param = 14;

optionalHingeLossParameter hinge_loss_param = 29;

optionalImageDataParameter image_data_param = 15;

optionalInfogainLossParameter infogain_loss_param = 16;

optionalInnerProductParameter inner_product_param = 17;

optionalLRNParameter lrn_param = 18;

optionalMemoryDataParameter memory_data_param = 22;

optionalMVNParameter mvn_param = 34;

optionalPoolingParameter pooling_param = 19;

optionalPowerParameter power_param = 21;

optionalReLUParameter relu_param = 30;

optionalSigmoidParameter sigmoid_param = 38;

optionalSoftmaxParameter softmax_param = 39;

optionalSliceParameter slice_param = 31;

optionalTanHParameter tanh_param = 37;

optionalThresholdParameter threshold_param = 25;

optionalWindowDataParameter window_data_param = 20;

optionalTransformationParameter transform_param = 36;

optionalLossParameter loss_param = 42;

optionalV0LayerParameter layer = 1;

}


//DEPRECATED: V0LayerParameter is the old way of specifying layerparameters

//in Caffe. We keep this message type around for legacy support.

messageV0LayerParameter {

optionalstring name = 1; // the layer name

optionalstring type = 2; // the string to specify the layer type


//Parameters to specify layers with inner products.

optionaluint32 num_output = 3; // The number of outputs for the layer

optionalbool biasterm = 4 [default = true]; // whether to have bias terms

optionalFillerParameter weight_filler = 5; // The filler for the weight

optionalFillerParameter bias_filler = 6; // The filler for the bias


optionaluint32 pad = 7 [default = 0]; // The padding size

optionaluint32 kernelsize = 8; // The kernel size

optionaluint32 group = 9 [default = 1]; // The group size for group conv

optionaluint32 stride = 10 [default = 1]; // The stride

enumPoolMethod {

MAX= 0;

AVE= 1;

STOCHASTIC= 2;

}

optionalPoolMethod pool = 11 [default = MAX]; // The pooling method

optionalfloat dropout_ratio = 12 [default = 0.5]; // dropout ratio


optionaluint32 local_size = 13 [default = 5]; // for local response norm

optionalfloat alpha = 14 [default = 1.]; // for local response norm

optionalfloat beta = 15 [default = 0.75]; // for local response norm

optionalfloat k = 22 [default = 1.];


//For data layers, specify the data source

optionalstring source = 16;

//For data pre-processing, we can do simple scaling and subtracting the

//data mean, if provided. Note that the mean subtraction is alwayscarried

//out before scaling.

optionalfloat scale = 17 [default = 1];

optionalstring meanfile = 18;

//For data layers, specify the batch size.

optionaluint32 batchsize = 19;

//For data layers, specify if we would like to randomly crop an image.

optionaluint32 cropsize = 20 [default = 0];

//For data layers, specify if we want to randomly mirror data.

optionalbool mirror = 21 [default = false];


//The blobs containing the numeric parameters of the layer

repeatedBlobProto blobs = 50;

//The ratio that is multiplied on the global learning rate. If you wantto

//set the learning ratio for one blob, you need to set it for allblobs.

repeatedfloat blobs_lr = 51;

//The weight decay that is multiplied on the global weight decay.

repeatedfloat weight_decay = 52;


//The rand_skip variable is for the data layer to skip a few datapoints

//to avoid all asynchronous sgd clients to start at the same point. Theskip

//point would be set as rand_skip * rand(0,1). Note that rand_skipshould not

//be larger than the number of keys in the database.

optionaluint32 rand_skip = 53 [default = 0];


//Fields related to detection (det_*)

//foreground (object) overlap threshold

optionalfloat det_fg_threshold = 54 [default = 0.5];

//background (non-object) overlap threshold

optionalfloat det_bg_threshold = 55 [default = 0.5];

//Fraction of batch that should be foreground objects

optionalfloat det_fg_fraction = 56 [default = 0.25];


//optional bool OBSOLETE_can_clobber = 57 [default = true];


//Amount of contextual padding to add around a window

//(used only by the window_data_layer)

optionaluint32 det_context_pad = 58 [default = 0];


//Mode for cropping out a detection window

//warp: cropped window is warped to a fixed size and aspect ratio

//square: the tightest square around the window is cropped

optionalstring det_crop_mode = 59 [default = "warp"];


//For ReshapeLayer, one needs to specify the new dimensions.

optionalint32 new_num = 60 [default = 0];

optionalint32 new_channels = 61 [default = 0];

optionalint32 new_height = 62 [default = 0];

optionalint32 new_width = 63 [default = 0];


//Whether or not ImageLayer should shuffle the list of files at everyepoch.

//It will also resize images if new_height or new_width are not zero.

optionalbool shuffle_images = 64 [default = false];


//For ConcatLayer, one needs to specify the dimension forconcatenation, and

//the other dimensions must be the same for all the bottom blobs.

//By default it will concatenate blobs along the channels dimension.

optionaluint32 concat_dim = 65 [default = 1];


optionalHDF5OutputParameter hdf5_output_param = 1001;

}


messagePReLUParameter {

//Parametric ReLU described in K. He et al, Delving Deep intoRectifiers:

//Surpassing Human-Level Performance on ImageNet Classification, 2015.


//Initial value of a_i. Default is a_i=0.25 for all i.

optionalFillerParameter filler = 1;

//Whether or not slope parameters are shared across channels.

optionalbool channel_shared = 2 [default = false];

}



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值