本文用于演示Krizhevsky设计到AlexNet的可视化效果。
为了运行于本机,本文只做了细微修改和中文说明,原始文件来源于Caffe官网对应的Notebook Examples。http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/filter_visualization.ipynb 。建议在实验前先运行 ./scripts/download_model_binary.py models/bvlc_reference_caffenet 完成caffemodel的下载。
---Last update 2015年6月7日
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# 切换工作目录到 caffe-master
%cd '/home/ouxinyu/caffe-master'
# Make sure that caffe is on the python path:
caffe_root = './' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
import os
if not os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
print("Downloading pre-trained CaffeNet model...")
! scripts/download_model_binary.py ../models/bvlc_reference_caffenet
Set Caffe to CPU mode, load the net in the test phase for inference, and configure input preprocessing.
caffe.set_mode_cpu()
net = caffe.Net(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
Classify the image by reshaping the net for the single input then doing the forward pass.
net.blobs['data'].reshape(1,3,227,227)
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))
out = net.forward()
print("Predicted class is #{}.".format(out['prob'].argmax()))
The layer features and their shapes (1 is the batch size, corresponding to the single input image in this example).
[(k, v.data.shape) for k, v in net.blobs.items()]
The parameters and their shapes. The parameters are net.params['name'][0] while biases are net.params['name'][1].
[(k, v[0].data.shape) for k, v in net.params.items()]
Helper functions for visualization
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
The input image
plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))
The first layer filters, conv1
# the parameters are a list of [weights, biases]
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
The first layer output, conv1 (rectified responses of the filters above, first 36 only)
feat = net.blobs['conv1'].data[0, :36]
vis_square(feat, padval=1)
The second layer filters, conv2
There are 256 filters, each of which has dimension 5 x 5 x 48. We show only the first 48 filters, with each channel shown separately, so that each filter is a row.
filters = net.params['conv2'][0].data
vis_square(filters[:48].reshape(48**2, 5, 5))
The second layer output, conv2 (rectified, only the first 36 of 256 channels)
feat = net.blobs['conv2'].data[0, :36]
vis_square(feat, padval=1)
The third layer output, conv3 (rectified, all 384 channels)
feat = net.blobs['conv3'].data[0]
vis_square(feat, padval=0.5)
The fourth layer output, conv4 (rectified, all 384 channels)
feat = net.blobs['conv4'].data[0]
vis_square(feat, padval=0.5)
The fifth layer output, conv5 (rectified, all 256 channels)
feat = net.blobs['conv5'].data[0]
vis_square(feat, padval=0.5)
The fifth layer after pooling, pool5
feat = net.blobs['pool5'].data[0]
vis_square(feat, padval=1)
The first fully connected layer, fc6 (rectified)
We show the output values and the histogram of the positive values
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
The second fully connected layer, fc7 (rectified)
feat = net.blobs['fc7'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
The final probability output, prob
feat = net.blobs['prob'].data[0]
plt.plot(feat.flat)
Let's see the top 5 predicted labels.
# load labels
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
! data/ilsvrc12/get_ilsvrc_aux.sh
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]