参考文章
http://www.cnblogs.com/xiaopanlyu/p/5793280.html
lenet_train_test.prototxt详解
name: "LeNet"
#==============1、定义TRAIN的数据层======================
layer {
name: "mnist" #定义该层的名字为mnist
type: "Data" #定义该层的类型为数据层
top: "data" #该层生成一个data blob
top: "label" #该层生成一个label blob
include {
phase: TRAIN #说明该层只在TRAIN阶段使用
}
transform_param {
scale: 0.00390625 #数据归一化系数,1/256=0.00390625
}
data_param {
source: "examples/mnist/number_train_leveldb" #训练数据路径
batch_size: 64 #训练阶段一次性处理数据100张图片 }
}
#==============1、定义TEST的数据层======================
layer {
name: "mnist" #定义该层的名字为mnist
type: "Data" #定义该层Wie数据层
top: "data" #该层生成一个data blob
top: "label" #该层生成一个label blob
include {
phase: TEST #该层只在test阶段使用
}
transform_param {
scale: 0.00390625 #归一化系数
}
data_param {
source: "examples/mnist/number_val_leveldb" #测试数据路径
batch_size: 100 #测试阶段一次性处理数据100张图片
backend: LEVELDB
}
}
#==============2、定义第一个卷积层conv1===================
#conv1数据变化:batch_size*1*28*28->batch_size*20*24*24
layer {
name: "conv1" #定义该层的名字为conv1
type: "Convolution" #定义该层的类型为卷积层
bottom: "data" #该层使用的数据为date
top: "conv1" #该层生成的数据为conv1
param {
lr_mult: 1 #weight learning rate(简写为lr)权值的学习率,1表示该值是#lenet_solver.prototxt中base_lr: 0.01的1倍
}
param {
lr_mult: 2 #bias learning rate偏移值的学习率,2表示该值是
#lenet_solver.prototxt中base_lr: 0.01的2倍
}
convolution_param {
num_output: 20 #产生20个输出通道
kernel_size: 5 #卷积核尺寸是5*5
stride: 1 卷积核移动步长为1
weight_filler {
type: "xavier" #xavier算法,根据输入和输出的神经元的个数自动
#始化权值比例
}
bias_filler {
type: "constant" #将偏移值初始化为“稳定”状态,即设为默认值0
}
}
}
#==============3、定义第一个池化层pool1===================
#池化层1数据变化batch_size*20*24*24->batch_size*20*12*12
layer {
name: "pool1"
type: "Pooling" #定义该层为池化层
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX #采用最大值池化
kernel_size: 2 #池化核尺寸为2*2
stride: 2 #池化核移动步长为2
}
}
#==============4、定义第二个卷积层conv2===================
#卷积层2数据变化:batch_size*20*12*12->batch_size*50*8*8
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
#==============5、定义第二个池化层pool2===================
#池化层2数据变化:batch_size*50*8*8->batch_size*50*4*4
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
#==============6、定义第一个全连接层ip1=================
#ip1过程数据变化:batch_size*50*4*4->batch_size*500*1*1
layer {
name: "ip1"
type: "InnerProduct" #定义该层为全连接层
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
#==============7、定义第一个ReLU层===================
layer {
name: "relu1"
type: "ReLU" #定义该层为ReLU层
bottom: "ip1"
top: "ip1"
}
#==============8、定义第二个全连接层ip2==================
#ip1过程数据变化:batch_size*500*1*1->batch_size*10*1*1
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
#==============9、定义显示准确率结果层===================
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
#==============10、定义损失函数层==================
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}