任务 Multilabel classification on PASCAL VOC using a Python data layer.
# -*- coding: UTF-8 -*-
from caffe import layers as L, params as P
import caffe
import sys
import os
import numpy as np
import os.path as osp
import matplotlib.pyplot as plt
from copy import copy
# 1 预备工作
plt.rcParams['figure.figsize'] = (6, 6)
caffe_root = '/home/zjhao/caffe/'
sys.path.append(caffe_root + 'python')
examples_root = '/home/zjhao/caffe/examples/'
sys.path.append(examples_root + 'pycaffe/layers')
sys.path.append(examples_root + 'pycaffe') #tools包在这里
import tools
pascal_root = '/home/zjhao/download/VOCdevkit/VOC2012/' # PASCAL图片根目录
classes = np.asarray([
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
])
if not os.path.isfile(
caffe_root +
'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
print('Downloading pre-trained CaffeNet model...')
os.system(
'%s/scripts/download_model_binary.py %s/models/bvlc_reference_caffenet'
% (caffe_root, caffe_root))
caffe.set_mode_gpu()
caffe.set_device(0)
# 2 定义网络prototxt
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(
bottom,
kernel_size=ks,
stride=stride,
num_output=nout,
pad=pad,
group=group)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout) # 全连接层
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
# https://blog.csdn.net/qq_24695385/article/details/80368618
# https://blog.csdn.net/qq_38906523/article/details/80032862
def caffenet_multilabel(data_layer_params, datalayer):
n = caffe.NetSpec()
n.data, n.label = L.Python(
module='pascal_multilabel_datalayers',
layer=datalayer,
ntop=2,
param_str=str(data_layer_params))
n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4)
n.pool1 = max_pool(n.relu1, 3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
n.pool2 = max_pool(n.relu2, 3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
n.pool5 = max_pool(n.relu5, 3, stride=2)
n.fc6, n.relu6 = fc_relu(n.pool5, 4096)
n.drop6 = L.Dropout(n.relu6, in_place=True)
n.fc7, n.relu7 = fc_relu(n.drop6, 4096)
n.drop7 = L.Dropout(n.relu7, in_place=True)
n.score = L.InnerProduct(n.drop7, num_output=20)
n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label)
return str(n.to_proto())
# 3 写nets和solver文件
workdir = './pascal_multilabel_with_datalayer'
if not os.path.isdir(workdir):
os.mkdir(workdir)
solverprototxt = tools.CaffeSolver(
trainnet_prototxt_path=osp.join(workdir, "trainnet.prototxt"),
testnet_prototxt_path=osp.join(workdir, "valnet.prototxt"))
solverprototxt.sp['display'] = '1'
solverprototxt.sp['base_lr'] = '0.0001'
solverprototxt.write(osp.join(workdir, 'solver.prototxt'))
# 写训练网络
with open(osp.join(workdir, 'trainnet.prototxt'), 'w') as f:
data_layer_params = dict(
batch_size=128,
im_shape=[227, 227],
split='train',
pascal_root=pascal_root)
f.write(
caffenet_multilabel(data_layer_params,
'PascalMultilabelDataLayerSync'))
# 写验证网络
with open(osp.join(workdir, 'valnet.prototxt'), 'w') as f:
data_layer_params = dict(
batch_size=128,
im_shape=[227, 227],
split='val',
pascal_root=pascal_root)
f.write(
caffenet_multilabel(data_layer_params,
'PascalMultilabelDataLayerSync'))
# 加载caffe的solver
solver = caffe.SGDSolver(osp.join(workdir, 'solver.prototxt'))
solver.net.copy_from(
caffe_root +
'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
solver.test_nets[0].share_with(solver.net)
solver.step(1)
# 查看加载数据
transformer = tools.SimpleTransformer()
image_index = 0
plt.figure()
plt.imshow(
transformer.deprocess(
copy(solver.net.blobs['data'].data[image_index, ...])))
gtlist = solver.net.blobs['label'].data[image_index, ...].astype(np.int)
plt.title('GT: {}'.format(classes[np.where(gtlist)]))
plt.axis('off')
# plt.show()
# 4 训练网络
def hamming_distance(gt, est):
return sum([1 for (g, e) in zip(gt, est) if g == e]) / float(len(gt))
def check_accuracy(net, num_batches, batch_size=128):
acc = 0.0
for t in range(num_batches):
net.forward()
gts = net.blobs['label'].data
ests = net.blobs['score'].data > 0
for gt, est in zip(gts, ests):
acc += hamming_distance(gt, est)
return acc / (num_batches * batch_size)
for itt in range(6):
solver.step(100)
print 'itt:{:3d}'.format((itt + 1) * 100), 'accuracy:{0:.4f}'.format(
solver.check_accuracy(solver.test_nets[0], 50))
# 5 查看一些预测结果
test_net = solver.test_nets[0]
for image_index in range(5):
plt.figure()
plt.imshow(
transformer.deprocess(
copy(test_net.blobs['data'].data[image_index, ...])))
gtlist = test_net.blobs['label'].data[image_index, ...].astype(np.int)
estlist = test_net.blobs['score'].data[image_index, ...] > 0
plt.title('GT: {} \n EST: {}'.format(classes[np.where(gtlist)],
classes[np.where(estlist)]))
plt.axis('off')