caffe/examples/mnist/lenet_solver.prototxt
# The train/test net protocol buffer definition
net: "examples/mnist/lenet_train_test.prototxt"
//用户训练/预测的网络描述文件(ProtoBuffer文本格式)
# test_iter specifies how many forward passes the test should carry out.
//预测阶段迭代次数。在mnist例程下,预测样本组(test batch)大小为100
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
//设置预测阶段迭代次数为100可以覆盖全部10000个测试样本
test_iter: 100
# Carry out testing every 500 training iterations.
//训练时每迭代500次,进行一次预测
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
网络的基础学习速率、冲量和权衰量
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
//学习速率的衰减策略
gamma: 0.0001
power: 0.75
# Display every 100 iterations
//每经过100次迭代,在屏幕上打印一次log
display: 100
# The maximum number of iterations
//最大迭代次数
max_iter: 10000
# snapshot intermediate results
//每500次打印一次快照
snapshot: 5000
snapshot_prefix: "examples/mnist/lenet"
# solver mode: CPU or GPU
//求解模式CPU或者GPU
solver_mode: CPU
caffe/examples/mnist/lenet_train_test.prototxt
name: "LeNet" //网络的名称
layer { //定义一个层(Layer)
name: "mnist" //层名称
type: "Data" //层类型:数据层
top: "data" //层输出 :data和label
top: "label"
include {
phase: TRAIN //本层只在训练阶段有效
}
transform_param {
scale: 0.00390625 //数据变换使用的数据缩放因子
}
data_param { //数据层参数
source: "examples/mnist/mnist_train_lmdb" //lmdb数据源路径
batch_size: 64 //批次大小,一次读取64张图
backend: LMDB //数据格式
}
}
layer { //一个新的数据层,名字也叫mnist,输出blog也是data和label,但是这里定义的参数只在分类阶段有效
name: "mnist"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer { //卷积层conv1,输入blog为data,输出blog为conv1
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1 //权值学习速率倍乘因子,1表示与全局参数保持一致
}
param {
lr_mult: 2 //bias学习速率倍乘因子,是全局参数的2倍
}
convolution_param { //卷积计算参数
num_output: 20 //输出feature map数目为20
kernel_size: 5 //卷积核尺寸
stride: 1 //卷积步长
weight_filler { //权值初始化策略
type: "xavier"
}
bias_filler { //bias初始化策略
type: "constant"
}
}
}
layer { //定义下采样层pool1,输入blob为conv1,输出blob为pool1
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param { //下采样参数
pool: MAX //下采样方法
kernel_size: 2 //下采样窗口尺寸
stride: 2 //下采样窗口步长
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer { //定义全连接层,输入blob为pool2,输出blog为ip1
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param { //全连接层参数
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer { //定义relu非线性层
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer { //分类准确率层,只在test阶段有效,该层用于计算分类准确率
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer { //损失层,损失函数为SoftmaxLoss,输入blob为ip2和label,输出blob为loss
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
lenet_train_test.prototxt 可视化
可视化工具网页:
http://ethereon.github.io/netscope/#/editor
在左边输入prototxt文本,按下shift+Enter,右侧会输出网络可视化图像