利用caffe训练网络的步骤

0:将图片设置好标号(从0开始的连续自然数)

1:首先需要将图片转换成需要的数据格式

#!/usr/bin/env sh
# Create the imagenet lmdb inputs
# N.B. set the path to the imagenet train + val data dirs

# EXAMPLE=examples/imagenet
# DATA=data/ilsvrc12
TOOLS=build/tools

# TRAIN_DATA_ROOT=/path/to/imagenet/train/
# VAL_DATA_ROOT=/path/to/imagenet/val/
TRAIN_DATA_ROOT=/examples/jb/train/
VAL_DATA_ROOT=/examples/jb//val/
LABEL_ROOT=/examples/jb
SAVE_DATA_ROOT=/examples/jb/data

# Set RESIZE=true to resize the images to 256x256. Leave as false if images have
# already been resized using another tool.
# RESIZE=false
RESIZE=true
if $RESIZE; then
  RESIZE_HEIGHT=32
  RESIZE_WIDTH=32
else
  RESIZE_HEIGHT=0
  RESIZE_WIDTH=0
fi

if [ ! -d "$TRAIN_DATA_ROOT" ]; then
  echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT"
  echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" \
       "where the ImageNet training data is stored."
  exit 1
fi

if [ ! -d "$VAL_DATA_ROOT" ]; then
  echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT"
  echo "Set the VAL_DATA_ROOT variable in create_imagenet.sh to the path" \
       "where the ImageNet validation data is stored."
  exit 1
fi

echo "Creating train lmdb..."

GLOG_logtostderr=1 $TOOLS/convert_imageset \
    --resize_height=$RESIZE_HEIGHT \
    --resize_width=$RESIZE_WIDTH \
    --shuffle \
    $TRAIN_DATA_ROOT \
    $LABEL_ROOT/train.txt \
    $SAVE_DATA_ROOT/train_lmdb

echo "Creating val lmdb..."

GLOG_logtostderr=1 $TOOLS/convert_imageset \
    --resize_height=$RESIZE_HEIGHT \
    --resize_width=$RESIZE_WIDTH \
    --shuffle \
    $VAL_DATA_ROOT \
    $LABEL_ROOT/val.txt \
    $SAVE_DATA_ROOT/val_lmdb

echo "Done."


2:计算训练样本的均值(彩色图时需要)

#!/usr/bin/env sh
# Compute the mean image from the imagenet training leveldb
# N.B. this is available in data/ilsvrc12

./build/tools/compute_image_mean examples/jb/data/train_lmdb \
  examples/jb/data/image_mean.binaryproto

echo "Done."

3:定义好网络的结构


name: "AlexNet"
layers {
  name: "data"
  type: DATA
  top: "data"
  top: "label"
  data_param {
    source: "examples/jb/data/train_lmdb"
    backend: LMDB
    batch_size: 256
  }
  transform_param {
    crop_size: 227
    mean_file: "examples/jb/data/image_mean.binaryproto"
    mirror: true
  }
  include: { phase: TRAIN }
}
layers {
  name: "data"
  type: DATA
  top: "data"
  top: "label"
  data_param {
    source: "examples/jb/data/val_lmdb"
    backend: LMDB
    batch_size: 50
  }
  transform_param {
    crop_size: 227
    mean_file: "examples/jb/data/image_mean.binaryproto"
    mirror: false
  }
  include: { phase: TEST }
}
layers {
  name: "conv1"
  type: CONVOLUTION
  bottom: "data"
  top: "conv1"
  blobs_lr: 1
  blobs_lr: 2
  weight_decay: 1
  weight_decay: 0
  convolution_param {
    num_output: 64
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layers {
  name: "relu1"
  type: RELU
  bottom: "conv1"
  top: "conv1"
}
layers {
  name: "norm1"
  type: LRN
  bottom: "conv1"
  top: "norm1"
  lrn_param {
    local_size: 9
    alpha: 0.0001
    beta: 0.75
  }
}
layers {
  name: "pool1"
  type: POOLING
  bottom: "norm1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layers {
  name: "conv2"
  type: CONVOLUTION
  bottom: "pool1"
  top: "conv2"
  blobs_lr: 1
  blobs_lr: 2
  weight_decay: 1
  weight_decay: 0
  convolution_param {
    num_output: 64
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layers {
  name: "relu2"
  type: RELU
  bottom: "conv2"
  top: "conv2"
}
layers {
  name: "norm2"
  type: LRN
  bottom: "conv2"
  top: "norm2"
  lrn_param {
    local_size: 9
#!/usr/bin/env sh

echo "begin to train the net!"
./build/tools/caffe train \
    --solver=examples/jb/solver.prototxt
echo "the net is finish"

alpha: 0.0001 beta: 0.75 }}layers { name: "pool2" type: POOLING bottom: "norm2" top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 }}layers { name: "conv3" type: CONVOLUTION bottom: "pool2" top: "conv3" blobs_lr: 1 blobs_lr: 2 weight_decay: 1 weight_decay: 0 convolution_param { num_output: 64 pad: 1
#!/usr/bin/env sh

echo "begin to train the net!"
./build/tools/caffe train \
    --solver=examples/jb/solver.prototxt
echo "the net is finish"

kernel_size: 3 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } }}layers { name: "relu3" type: RELU bottom: "conv3" top: "conv3"}layers { name: "conv4" type: CONVOLUTION bottom: "conv3" top: "conv4" blobs_lr: 1 blobs_lr: 2 weight_decay: 1 weight_decay: 0 convolution_param { num_output: 32 pad: 1 kernel_size: 3 group: 2 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0.1 } }}layers { name: "relu4" type: RELU bottom: "conv4" top: "conv4"}layers { name: "fc6" type: INNER_PRODUCT bottom: "conv4" top: "fc6" blobs_lr: 1 blobs_lr: 2 weight_decay: 1 weight_decay: 0 inner_product_param { num_output: 43 weight_filler { type: "gaussian" std: 0.005 } bias_filler { type: "constant" value: 0.1 } }}layers { name: "relu6" type: RELU bottom: "fc6" top: "fc6"}layers { name: "accuracy" type: ACCURACY bottom: "fc6" bottom: "label" top: "accuracy" include: { phase: TEST }}layers { name: "loss" type: SOFTMAX_LOSS bottom: "fc6" bottom: "label" top: "loss"}


4:定义好sover文件

net: "examples/jb/train_val.prototxt"
test_iter: 1000
test_interval: 1000
base_lr: 0.01
lr_policy: "step"
gamma: 0.1
stepsize: 100000
display: 20
max_iter: 450000
momentum: 0.9
weight_decay: 0.0005
snapshot: 10000
snapshot_prefix: "examples/jb/models/caffe_alexnet_train"
solver_mode: GPU

5:训练网络

#!/usr/bin/env sh

echo "begin to train the net!"
./build/tools/caffe train \
    --solver=examples/jb/solver.prototxt
echo "the net is finish"



这样就完成了网络的整个训练过程,之后可以利用这个模型进行测试。


参考博客:http://blog.csdn.net/hebustkyl/article/details/45534219

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值