在win7上编译并运行了测试之后,免不了要跑一下自己的数据,很多同学都写过这个题目,但是自己去运用发现还是有些不爽,这里就自己记录下,自己跑数据的时候的流程,权当给自己的记录,如果别人能用的上当然更让我高兴。
1 目标
这里用的是caffe的模拟12年那篇惊世骇俗的作品的网络,ImageNet Classification with Deep Convolutional Neural Networks当然如果自己进行修改,针对自己的图片数据而言的话,可能会有更不错的效果。
2 流程记录
1〉首先要解决的就是自己的目录,我的caffe根目录是M:\caffe-windows\caffe
2〉在根目录建立一个文件夹mytest 用来存放自己的网络模型和解决方案
两个脚本分别是计算均值(重复了),以及训练和测试程序的,solver自然是解决方案,train_val.prototxt是训练和测试使用的网路结构。
3〉在/data/建立一个文件夹mydata 用来存放自己的图片样本
这里我 又建立了 train 和 val两个子文件夹存放训练样本和交换区样本
如图:
ps:其中有我的脚本分别是用来生成lmdb和计算均值的
4〉首先我们生成第二幅图的lmdb数据库
SET GLOG_logtostderr=1
..\..\Build\x64\Debug\convert_imageset.exe train\ train.txt my_train_lmdb 0
pause
保存上述代码为bat即可,可以使用相对路径,这段脚本是生成train的lmdb
val也需要,我们可以根据上述改写
SET GLOG_logtostderr=1
..\..\Build\x64\Debug\convert_imageset.exe val\ val.txt my_val_lmdb 0
pause
5〉生成均值
SET GLOG_logtostderr=1
..\..\Build\x64\Debug\compute_image_mean.exe my_train_lmdb my_mean.binaryproto
pause
以上脚本的最后一组词都是生成文件的名称
6〉准备train_val.prototxt
在mytest文件夹下直接写入
name: "CaffeNet"
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "../data/mydata/my_mean.binaryproto"
}
data_param {
source: "M:/caffe-windows/caffe/data/mydata/my_train_lmdb"
batch_size: 256
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
crop_size: 227
mean_file: "../data/mydata/my_mean.binaryproto"
}
data_param {
source: "M:/caffe-windows/caffe/data/mydata/my_val_lmdb"
batch_size: 50
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "norm1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
这里网络我并未做调整,如果你用的是leveldb,需要改变我写的蓝色的部分。
7〉编写解决方案solver.prototxt
这是从model下copy过来并修改的,其实就是caffe给的例子。
net: "M:/caffe-windows/caffe/mytest/train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.01
lr_policy: "step"
gamma: 0.1
stepsize: 100000
display: 20
max_iter: 450000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "M:/caffe-windows/caffe/mytest/mynet_train"
solver_mode: GPU
8〉准备运行的程序mytest.bat
LOG=log/train-`date +%Y-%m-%d-%H-%M-%S`.log
..\Build\x64\Debug\caffe.exe train --solver solver.prototxt
pause
9〉顺序如下
依次运行
1转换lmdb
2计算均值
3运行caffe
可看到下图
说明你的配置是无误的,各位就等待看结果吧。