0.写在前面
本次操作基于这两个博客,大家可以看看:
https://blog.csdn.net/Xmo_jiao/article/details/77897109
https://blog.csdn.net/ruotianxia/article/details/78331964
1.编译matio
安装命令:sudo apt-get install libmatio-dev
2.安装wget
pip install wget
3.下载Deeplabv2并编译
git clone https://github.com/xmojiao/deeplab_v2.git
4.编译caffe
修改deeplab_v2/deeplab-public-ver2/路径下的 Makefile.config.example文件,重命名为Makefile.config
接着修改这个文件中的内容,将第四行的 “# USE_CUDNN := 1”的 # 去掉。
#OPENCV_VERSION := 3 这个#去掉 (因为我装的是opencv3)
Makefile.config:
将
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib
1
2
修改为:
INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include /usr/include/hdf5/serial
LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib /usr/lib/x86_64-linux-gnumake
1
2
Makefile:
将
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5
1
修改为:
LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_serial_hl hdf5_serial matio
1
打开./include/caffe/common.cuh文件
在atomicAdd前添加宏判断
// Copyright 2014 George Papandreou
#ifndef CAFFE_COMMON_CUH_
#define CAFFE_COMMON_CUH_
#include <cuda.h>
// CUDA: atomicAdd is not defined for doubles
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#endif
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
7.将BVLC(https://github.com/BVLC/caffe)中的下列文件copy 到相应的文件夹:
./include/caffe/util/cudnn.hpp
./include/caffe/layers/cudnn_conv_layer.hpp
./include/caffe/layers/cudnn_relu_layer.hpp
./include/caffe/layers/cudnn_sigmoid_layer.hpp
./include/caffe/layers/cudnn_tanh_layer.hpp
./src/caffe/layers/cudnn_conv_layer.cpp
./src/caffe/layers/cudnn_conv_layer.cu
./src/caffe/layers/cudnn_relu_layer.cpp
./src/caffe/layers/cudnn_relu_layer.cu
./src/caffe/layers/cudnn_sigmoid_layer.cpp
./src/caffe/layers/cudnn_sigmoid_layer.cu
./src/caffe/layers/cudnn_tanh_layer.cpp
./src/caffe/layers/cudnn_tanh_layer.cu
切换到目录:
然后
make all -j16
make pycaffe -j16
5.准备数据
数据集下载
# augmented PASCAL VOC
mkdir -p ~/DL_dataset
cd ~/DL_dataset #save datasets 为$DATASETS
wget http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz # 1.3 GB
tar -zxvf benchmark.tgz
mv benchmark_RELEASE VOC_aug
#original PASCAL VOC 2012
wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar # 2 GB
tar -xvf VOCtrainval_11-May-2012.tar
mv VOCdevkit/VOC2012 VOC2012_orig && rm -r VOCdevkit
1
2
3
4
5
6
7
8
9
10
11
2.数据转换
因为pascal voc2012增强数据集的label是mat格式的文件,所以我们需要把mat格式的label转为png格式的图片.
cd ~/DL_dataset/VOC_aug/dataset
mkdir cls_png
cd ~/deeplab_v2/voc2012/
./mat2png.py ~/DL_dataset/VOC_aug/dataset/cls ~/DL_dataset/VOC_aug/dataset/cls_png
1
2
3
4
pascal voc2012原始数据集的label为三通道RGB图像,但是caffe最后一层softmax loss 层只能识别一通道的label,所以此处我们需要对原始数据集的label进行降维
cd ~/DL_dataset/VOC2012_orig
mkdir SegmentationClass_1D
cd ~/deeplab_v2/voc2012
./convert_labels.py ~/DL_dataset/VOC2012_orig/SegmentationClass/ ~/DL_dataset/VOC2012_orig/ImageSets/Segmentation/trainval.txt ~/DL_dataset/VOC2012_orig/SegmentationClass_1D/
1
2
3
4
5
6
数据融合
此时我们已经处理好好pascal voc2012 增强数据集和pascal voc2012的原始数据集,为了便于train.txt等文件的调用,我们需要将两个文件夹数据合并到同一个文件中.目前已有数据文件如下:
~/DL_dataset/VOC2012_orig 为原始pascal voc2012文件夹
images数据集的文件名为:JPEGImages
labels数据集文件名为:SegmentationClass_1D
~/DL_dataset/VOC_aug/dataset为pascal voc2012增强数据集文件夹
images数据集的文件名为:img
,jpg图片数为5073
labels数据集文件名为:cls_png,png图片数11355
现分别pascal voc2012增强数据集里的images和labels复制到增强数据集中,若重复则覆盖,合将并数据集的操作如下:
cp ~/DL_dataset/VOC2012_orig/SegmentationClass_1D/* ~/DL_dataset/VOC_aug/dataset/cls_png
cp ~/DL_dataset/VOC2012_orig/JPEGImages/* ~/DL_dataset/VOC_aug/dataset/img/
1
2
文件名修改
对应train.txt文件的数据集文件名,修改文件名。
cd ~/DL_dataset/VOC_aug/dataset
mv ./img ./JPEGImages
mv ./cls_png ./SegmentationClassAug
1
2
3
到此处, ~/DL_dataset/VOC_aug/dataset文件夹中
images数据集的文件名为:JPEGImages ,jpg图片数由5073变为17125
labels数据集文件名为:SegmentationClassAug ,png图片数由11355变为12031
6.模型训练
下载模型
以vgg16训练为例,下载:
wget http://liangchiehchen.com/projects/released/deeplab_aspp_vgg16/prototxt_and_model.zip
unzip prototxt_and_model.zip
mv *caffemodel ~/deeplab_v2/voc2012/model/deeplab_largeFOV
rm *prototxt
1
2
3
4
deeplab2的script脚本文件run_pascal.sh 解析
目前我们已经准备好数据集和数据txt文件,参数文件model,网络结构文件prototxt,和三个sh脚本文件,接下来只需要修改run_pascal.sh文件,deeplabv2就可以run起来了。
在deeplab_v2/voc2012里
#!/bin/sh
## MODIFY PATH for YOUR SETTING
ROOT_DIR=~/DL_dataset #此处为voc数据集主路径
CAFFE_DIR=../deeplab-public-ver2 #此处为官方deeplab源码的路径
CAFFE_BIN=${CAFFE_DIR}/.build_release/tools/caffe.bin
EXP=. #此目录路径~/deeplab_v2/voc2012
if [ "${EXP}" = "." ]; then #若数据集为voc2012,则分类数为21,数DATA_ROOT据集具体路径
NUM_LABELS=21
DATA_ROOT=${ROOT_DIR}/VOC_aug/dataset/
else
NUM_LABELS=0
echo "Wrong exp name"
fi
## Specify which model to train
########### voc12 ################
NET_ID=deeplab_largeFOV ##此处原文件名有问题应该改为deeplab_largeFOV
## Variables used for weakly or semi-supervisedly training
#TRAIN_SET_SUFFIX=
TRAIN_SET_SUFFIX=_aug #此处为选择train_aug.txt数据集
#TRAIN_SET_STRONG=train
#TRAIN_SET_STRONG=train200
#TRAIN_SET_STRONG=train500
#TRAIN_SET_STRONG=train1000
#TRAIN_SET_STRONG=train750
#TRAIN_SET_WEAK_LEN=5000
DEV_ID=0
#####
## Create dirs
CONFIG_DIR=${EXP}/config/${NET_ID} #此处目录为/voc2012/config/deeplab_largeFOV
MODEL_DIR=${EXP}/model/${NET_ID}
mkdir -p ${MODEL_DIR}
LOG_DIR=${EXP}/log/${NET_ID}
mkdir -p ${LOG_DIR}
export GLOG_log_dir=${LOG_DIR}
## Run
RUN_TRAIN=1 #为1说明执行train
RUN_TEST=0 #为1说明执行test
RUN_TRAIN2=0
RUN_TEST2=0
## Training #1 (on train_aug)
if [ ${RUN_TRAIN} -eq 1 ]; then #r如果RUN_TRAIN为1
#
LIST_DIR=${EXP}/list
TRAIN_SET=train${TRAIN_SET_SUFFIX}
if [ -z ${TRAIN_SET_WEAK_LEN} ]; then #如果TRAIN_SET_WEAK_LEN长度为零则为真
TRAIN_SET_WEAK=${TRAIN_SET}_diff_${TRAIN_SET_STRONG}
comm -3 ${LIST_DIR}/${TRAIN_SET}.txt ${LIST_DIR}/${TRAIN_SET_STRONG}.txt > ${LIST_DIR}/${TRAIN_SET_WEAK}.txt
else
TRAIN_SET_WEAK=${TRAIN_SET}_diff_${TRAIN_SET_STRONG}_head${TRAIN_SET_WEAK_LEN}
comm -3 ${LIST_DIR}/${TRAIN_SET}.txt ${LIST_DIR}/${TRAIN_SET_STRONG}.txt | head -n ${TRAIN_SET_WEAK_LEN} > ${LIST_DIR}/${TRAIN_SET_WEAK}.txt
fi
#
MODEL=${EXP}/model/${NET_ID}/init.caffemodel #下载的vgg16或者ResNet101中的 model
#
echo Training net ${EXP}/${NET_ID}
for pname in train solver; do
sed "$(eval echo $(cat sub.sed))" \
${CONFIG_DIR}/${pname}.prototxt > ${CONFIG_DIR}/${pname}_${TRAIN_SET}.prototxt
done #此部分运行时如以下命令
CMD="${CAFFE_BIN} train \
--solver=${CONFIG_DIR}/solver_${TRAIN_SET}.prototxt \
--gpu=${DEV_ID}"
if [ -f ${MODEL} ]; then
CMD="${CMD} --weights=${MODEL}"
fi
echo Running ${CMD} && ${CMD}
fi
#train部分运行时,即以下运行命令 ../deeplab-public-ver2/.build_release/tools/caffe.bin train --solver=volab_largeFOV/solver_train_aug.prototxt --gpu=0 --weights=voc12/model/deeplab_largeFOV/init.caf femodel
#上述命令中,solver_train_aug.prototxt由solve.prototxt文件复制而来,init.caffemodel为原始下载了的VGG16的model
## Test #1 specification (on val or test)
if [ ${RUN_TEST} -eq 1 ]; then
#
for TEST_SET in val; do
TEST_ITER=`cat ${EXP}/list/${TEST_SET}.txt | wc -l` #此处计算val.txt文件中测试图片个数,共1449个
MODEL=${EXP}/model/${NET_ID}/test.caffemodel
if [ ! -f ${MODEL} ]; then
MODEL=`ls -t ${EXP}/model/${NET_ID}/train_iter_*.caffemodel | head -n 1`
fi
#
echo Testing net ${EXP}/${NET_ID}
FEATURE_DIR=${EXP}/features/${NET_ID}
mkdir -p ${FEATURE_DIR}/${TEST_SET}/fc8
mkdir -p ${FEATURE_DIR}/${TEST_SET}/fc9
mkdir -p ${FEATURE_DIR}/${TEST_SET}/seg_score
sed "$(eval echo $(cat sub.sed))" \
${CONFIG_DIR}/test.prototxt > ${CONFIG_DIR}/test_${TEST_SET}.prototxt
CMD="${CAFFE_BIN} test \
--model=${CONFIG_DIR}/test_${TEST_SET}.prototxt \
--weights=${MODEL} \
--gpu=${DEV_ID} \
--iterations=${TEST_ITER}"
echo Running ${CMD} && ${CMD}
done
fi
#test部分运行时,即以下运行命令../deeplab-public-ver2/.build_release/tools/caffe.bin test --model=voc12/config/deeplab_largeFOV/test_val.prototxt --weights=voc12/model/deeplab_largeFOV/train_iter_20000.caffemodel --gpu=0 --iterations=1449
#上述命令中,test_val.prototxt由test.prototxt文件复制而来,train_iter_20000.caffemode由第一部分train得到的model
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
注意这两行:
ROOT_DIR=~/DL_dataset #此处为voc数据集主路径
DATA_ROOT=${ROOT_DIR}/VOC_aug/dataset/
1
2
需要自己改成自己的路径
跑通模型
cd ~/deeplab_v2/voc2012
sh run_pascal.sh 2>&1|tee train.log
1
2
如下图所示
---------------------
作者:被月亮晒黑_
来源:CSDN
原文:https://blog.csdn.net/qq_40314507/article/details/89529845
版权声明:本文为博主原创文章,转载请附上博文链接!