脚本路径:experiments/scripts/faster_rcnn_end2end.sh
#!/bin/bash
# Usage:
# ./experiments/scripts/faster_rcnn_end2end.sh GPU NET DATASET [options args to {train,test}_net.py]
# DATASET is either pascal_voc or coco.
#
# Example:
# ./experiments/scripts/faster_rcnn_end2end.sh 0 VGG_CNN_M_1024 pascal_voc \
# --set EXP_DIR foobar RNG_SEED 42 TRAIN.SCALES "[400, 500, 600, 700]"
set -x #将后面执行的命令输出到屏幕
set -e #如果命令的返回值不是0 则退出shell
export PYTHONUNBUFFERED=
"True"
#和缓存有关系的一个变量,使得按顺序输出
GPU_ID=$1 # 这一部分是读取命令信息,包括GPU的编号,网络类型,以及数据集类型等
NET=$2
NET_lc=${NET,,}
DATASET=$3
array=( $@ )
len=${#array[@]}
EXTRA_ARGS=${array[@]:3:$len}
EXTRA_ARGS_SLUG=${EXTRA_ARGS
// /_}
case
$DATASET in #根据输入数据类型,进行不同的处理,分为3种情况pascal_voc; coco; 错误类型
pascal_voc)
TRAIN_IMDB=
"voc_2007_trainval"
#定义相应的变量
TEST_IMDB=
"voc_2007_test"
PT_DIR=
"pascal_voc"
ITERS=70000 #定义迭代次数
;;
coco)
# This is a very long and slow training schedule
# You can probably use fewer iterations and reduce the
# time to the LR drop (set in the solver to 350,000 iterations).
TRAIN_IMDB=
"coco_2014_train"
#定义相应的变量
TEST_IMDB=
"coco_2014_minival"
PT_DIR=
"coco"
ITERS=490000 #定义迭代次数
;;
*)
echo
"No dataset given"
exit
;;
esac
LOG=
"experiments/logs/faster_rcnn_end2end_${NET}_${EXTRA_ARGS_SLUG}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
#训练日志的存储路径
exec &> >(tee -a
"$LOG"
)
echo Logging output to
"$LOG"
time
./tools/train_net.py --gpu ${GPU_ID} \ #加载网络训练的相关参数并进行训练
--solver models/${PT_DIR}/${NET}/faster_rcnn_end2end/solver.prototxt \
--weights data/imagenet_models/${NET}.v2.caffemodel \
--imdb ${TRAIN_IMDB} \
--iters ${ITERS} \
--cfg experiments/cfgs/faster_rcnn_end2end.yml \
${EXTRA_ARGS}
set +x
NET_FINAL=`grep -B 1
"done solving"
${LOG} | grep
"Wrote snapshot"
| awk
'{print $4}'
`
set -x
time
./tools/test_net.py --gpu ${GPU_ID} \ #测试过程
--def models/${PT_DIR}/${NET}/faster_rcnn_end2end/test.prototxt \
--net ${NET_FINAL} \
--imdb ${TEST_IMDB} \
--cfg experiments/cfgs/faster_rcnn_end2end.yml \
${EXTRA_ARGS}