caffe如何自定义网络以及自定义层(python)(二)

继续熟悉前一篇的东西。

Load pretrained parameters to classify an image

首先从gist下载模型脚本:

./scripts/download_model_from_gist.sh <gist_id>

#!/usr/bin/env sh
#coding=utf-8
#终端后面输入的第一个参数
GIST=$1
#终端后面输入的第二个参数
DIRNAME=${2:-./models}
#-z检测字符串长度是否为0,为0返回 true。
if [ -z $GIST ]; then
  echo "usage: download_model_from_gist.sh <gist_id> <dirname>"
  exit
fi

GIST_DIR=$(echo $GIST | tr '/' '-')
MODEL_DIR="$DIRNAME/$GIST_DIR"
#-d检测文件是否是目录,如果是,则返回 true。
if [ -d $MODEL_DIR ]; then
    echo "$MODEL_DIR already exists! Please make sure you're not overwriting anything important!"
    exit
fi

echo "Downloading Caffe model info to $MODEL_DIR ..."
#-p检测文件是否是具名管道,如果是,则返回 true。
mkdir -p $MODEL_DIR
wget https://gist.github.com/$GIST/download -O $MODEL_DIR/gist.zip
unzip -j $MODEL_DIR/gist.zip -d $MODEL_DIR
rm $MODEL_DIR/gist.zip
echo "Done"
脚本二,./scripts/download_model_binary.py <dirname>

#!/usr/bin/env python
#coding=utf-8
import os
import sys
import time
import yaml
import urllib
import hashlib
import argparse
"""
argparse模块使得编写用户友好的命令行接口非常容易。程序只需定义好它要求的参数,
然后argparse将负责如何从sys.argv中解析出这些参数。argparse模块还会自动生成帮
助和使用信息并且当用户赋给程序非法的参数时产生错误信息。
"""
required_keys = ['caffemodel', 'caffemodel_url', 'sha1']


def reporthook(count, block_size, total_size):
    """
    From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
    """
    global start_time
    if count == 0:
        start_time = time.time()
        return
    duration = (time.time() - start_time) or 0.01
    progress_size = int(count * block_size)
    speed = int(progress_size / (1024 * duration))
    percent = int(count * block_size * 100 / total_size)
    sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
                    (percent, progress_size / (1024 * 1024), speed, duration))
    sys.stdout.flush()


def parse_readme_frontmatter(dirname):
    readme_filename = os.path.join(dirname, 'readme.md')
    with open(readme_filename) as f:
#line.strip()默认删除空白符(包括'\n', '\r',  '\t',  ' ')
        lines = [line.strip() for line in f.readlines()]
#lines为列表,找'---'的位置
    top = lines.index('---')
    bottom = lines.index('---', top + 1)#top + 1结束索引
#要通过YAML描述与Python的对应关系,从而方便读者了解YAML的层次及结构,
#最常见的是映射到Python中的列表(List)、字典(Dictionary)两种对象类型
    frontmatter = yaml.load('\n'.join(lines[top + 1:bottom]))
    assert all(key in frontmatter for key in required_keys)
    return dirname, frontmatter


def valid_dirname(dirname):
    try:
        return parse_readme_frontmatter(dirname)
    except Exception as e:
        print('ERROR: {}'.format(e))
        raise argparse.ArgumentTypeError(
            'Must be valid Caffe model directory with a correct readme.md')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Download trained model binary.')
    parser.add_argument('dirname', type=valid_dirname)#输入的参数
    args = parser.parse_args()

    # A tiny hack: the dirname validator also returns readme YAML frontmatter.
    dirname = args.dirname[0]
    frontmatter = args.dirname[1]
#这个就是下载的模型
    model_filename = os.path.join(dirname, frontmatter['caffemodel'])

    # Closure-d function for checking SHA1.
#对于MD5如果需要16位(bytes)的值那么调用对象的digest()而hexdigest()默认是32位(bytes),
#同理Sha1的digest()和hexdigest()分别产生20位(bytes)和40位(bytes)的hash值
    def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
        with open(filename, 'rb') as f:
            return hashlib.sha1(f.read()).hexdigest() == sha1
#判断是否存在
    if os.path.exists(model_filename) and model_checks_out():
        print("Model already exists.")
        sys.exit(0)
#下载模型并且核实模型
    urllib.urlretrieve(
        frontmatter['caffemodel_url'], model_filename, reporthook)
    if not model_checks_out():
        print('ERROR: model did not download correctly! Run this again.')
        sys.exit(1)
gist上默认的路径在models下面,用上面的脚本下载一个caffenet的模型和label:

./scripts/download_model_binary.py models/bvlc_reference_caffenet
./data/ilsvrc12/get_ilsvrc_aux.sh
#have a look at the model
python python/draw_net.py models/bvlc_reference_caffenet/deploy.prototxt caffenet.png
open caffenet.png

首先将deploy.prototxt的输入层里面的10改为1,然后运行下面的代码:


#!/usr/bin/env python
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

caffe_root = '/home/x/git/caffe/'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe

#caffe.set_mode_cpu()

caffe.set_device(0)
caffe.set_mode_gpu()

#load the model
net = caffe.Net('/home/x/git/caffe/models/bvlc_reference_caffenet/deploy.prototxt',
                '/home/x/git/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
                caffe.TEST)

# load input and configure preprocessing
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', np.load('/home/x/git/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)

#note we can change the batch size on-the-fly
#since we classify only one image, we change batch size from 10 to 1
net.blobs['data'].reshape(1,3,227,227)

#load the image in the data layer
im = caffe.io.load_image('/home/x/git/caffe/examples/images/cat.jpg')
net.blobs['data'].data[...] = transformer.preprocess('data', im)

#compute
out = net.forward()

# other possibility : out = net.forward_all(data=np.asarray([transformer.preprocess('data', im)]))

#predicted predicted class
print out['prob'].argmax()
print '========================'
#print predicted labels
labels = np.loadtxt("/home/x/git/caffe/data/ilsvrc12/synset_words.txt", str, delimiter='\t')
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
print labels[top_k]

官网给出的Surgery, http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/net_surgery.ipynb

import numpy as np
import matplotlib.pyplot as plt
#matplotlib inline
import pylab
#matplotlib.use('TkAgg')
#import matplotlib.rcsetup as rcsetup
#print(rcsetup.all_backends)

# Make sure that caffe is on the python path:
caffe_root = '/home/x/git/caffe/'  # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')

import caffe

# configure plotting
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Load the net, list its data and params, and filter an example image.
caffe.set_mode_cpu()
net = caffe.Net('/home/x/git/caffe/examples/net_surgery/conv.prototxt', caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))

# load image and prepare as a single input batch for Caffe
im = np.array(caffe.io.load_image('/home/x/git/caffe/examples/images/cat_gray.jpg', color=False)).squeeze()
plt.title("original image")
plt.imshow(im)
plt.axis('off')

im_input = im[np.newaxis, np.newaxis, :, :]
net.blobs['data'].reshape(*im_input.shape)
net.blobs['data'].data[...] = im_input
# helper show filter outputs
def show_filters(net):
    net.forward()
    plt.figure()
    filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()
    for i in range(3):
        plt.subplot(1,4,i+2)
        plt.title("filter #{} output".format(i))
        plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)
        plt.tight_layout()
        plt.axis('off')
# filter the image with initial
show_filters(net)
# pick first filter output
conv0 = net.blobs['conv'].data[0, 0]
print("pre-surgery output mean {:.2f}".format(conv0.mean()))
# set first filter bias to 1
net.params['conv'][1].data[0] = 1.
net.forward()
print("post-surgery output mean {:.2f}".format(conv0.mean()))
ksize = net.params['conv'][0].data.shape[2:]
# make Gaussian blur
sigma = 1.
y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]
g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))
gaussian = (g / g.sum()).astype(np.float32)
net.params['conv'][0].data[0] = gaussian
# make Sobel operator for edge detection
net.params['conv'][0].data[1:] = 0.
sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))
net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel  # horizontal
net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T  # vertical
show_filters(net)
pylab.show()
#print(matplotlib.backends.backend)
print 'aaaaaaaaaaaaa'
下面将caffenet的全链接层转化为全卷积。

#!/usr/bin/env python
#coding=utf-8
import numpy as np
import pylab
import matplotlib.pyplot as plt
from PIL import Image
caffe_root = '/home/x/git/caffe/'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe

#caffe.set_mode_cpu()

caffe.set_device(0)
caffe.set_mode_gpu()

# Load the original network and extract the fully connected layers' parameters.
net = caffe.Net('/home/x/git/caffe/models/bvlc_reference_caffenet/deploy.prototxt',
                '/home/x/git/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
                caffe.TEST)
params = ['fc6', 'fc7', 'fc8']
# fc_params = {name: (weights, biases)}
fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}
for fc in params:
    print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape)

# Load the fully convolutional network to transplant the parameters.
net_full_conv = caffe.Net('/home/x/git/caffe/examples/net_surgery/bvlc_caffenet_full_conv.prototxt', 
                          '/home/x/git/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
                          caffe.TEST)
params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']
# conv_params = {name: (weights, biases)}
conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}

for conv in params_full_conv:
    print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape)
for pr, pr_conv in zip(params, params_full_conv):
    conv_params[pr_conv][0].flat = fc_params[pr][0].flat  # flat unrolls the arrays
    conv_params[pr_conv][1][...] = fc_params[pr][1]

net_full_conv.save('/home/x/git/caffe/examples/net_surgery/bvlc_caffenet_full_conv.caffemodel')


# load input and configure preprocessing
im = caffe.io.load_image('/home/x/git/caffe/examples/images/cat.jpg')
transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape})
transformer.set_mean('data', np.load('/home/x/git/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
# make classification map by forward and print prediction indices at each location
out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)]))
print out['prob'][0].argmax(axis=0)
# show net input and confidence map (probability of the top prediction at each location)
plt.subplot(1, 2, 1)
plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0]))
plt.subplot(1, 2, 2)
plt.imshow(out['prob'][0,281])
pylab.show()

顺便跑跑特征可视化的代码:

#!/usr/bin/env python
#coding=utf-8
import numpy as np
import pylab
import matplotlib.pyplot as plt
from PIL import Image
caffe_root = '/home/x/git/caffe/'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe

#caffe.set_mode_cpu()

caffe.set_device(0)
caffe.set_mode_gpu()
# If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path.
import os
if os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
    print 'CaffeNet found.'
else:
    print 'Downloading pre-trained CaffeNet model...'
#../scripts/download_model_binary.py ../models/bvlc_reference_caffenet

model_def = caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt'
model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'

net = caffe.Net(model_def,      # defines the structure of the model
                model_weights,  # contains the trained weights
                caffe.TEST)     # use test mode (e.g., don't perform dropout)
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1)  # average over pixels to obtain the mean (BGR) pixel values
print 'mean-subtracted values:', zip('BGR', mu)

# create transformer for the input called 'data'
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})

transformer.set_transpose('data', (2,0,1))  # move image channels to outermost dimension
transformer.set_mean('data', mu)            # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255)      # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0))  # swap channels from RGB to BGR
# set the size of the input (we can skip this if we're happy
#  with the default; we can also change it later, e.g., for different batch sizes)
net.blobs['data'].reshape(50,        # batch size
                          3,         # 3-channel (BGR) images
                          227, 227)  # image size is 227x227
image = caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')
transformed_image = transformer.preprocess('data', image)
plt.imshow(image)
# copy the image data into the memory allocated for the net
net.blobs['data'].data[...] = transformed_image

### perform classification
output = net.forward()

output_prob = output['prob'][0]  # the output probability vector for the first image in the batch

print 'predicted class is:', output_prob.argmax()


# load ImageNet labels
labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt'
#if not os.path.exists(labels_file):
#    !../data/ilsvrc12/get_ilsvrc_aux.sh
    
labels = np.loadtxt(labels_file, str, delimiter='\t')

print 'output label:', labels[output_prob.argmax()]

# sort top five predictions from softmax output
top_inds = output_prob.argsort()[::-1][:5]  # reverse sort and take five largest items

print 'probabilities and labels:'
zip(output_prob[top_inds], labels[top_inds])
# for each layer, show the output shape
for layer_name, blob in net.blobs.iteritems():
    print layer_name + '\t' + str(blob.data.shape)
for layer_name, param in net.params.iteritems():
    print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)
def vis_square(data):
    """Take an array of shape (n, height, width) or (n, height, width, 3)
       and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
    
    # normalize data for display
    data = (data - data.min()) / (data.max() - data.min())
    
    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = (((0, n ** 2 - data.shape[0]),
               (0, 1), (0, 1))                 # add some space between filters
               + ((0, 0),) * (data.ndim - 3))  # don't pad the last dimension (if there is one)
    data = np.pad(data, padding, mode='constant', constant_values=1)  # pad with ones (white)
    
    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    plt.imshow(data); plt.axis('off')
    pylab.show()
# the parameters are a list of [weights, biases]
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))

feat = net.blobs['conv1'].data[0, :36]
vis_square(feat)

feat = net.blobs['pool5'].data[0]
vis_square(feat)
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
feat = net.blobs['prob'].data[0]
plt.figure(figsize=(15, 3))
plt.plot(feat.flat)
pylab.show()

上图是the first layer filters,conv1

上图是The first layer output,conv1

上图是The fifth layer after pooling,pool5


上图是The first fully connected layer,fc6


上图是The final probability output,prob

知识小插曲:

diff命令:

功能:比较两个文件的差异,并把不同地方的信息显示出来。默认diff格式的信息。

diff比较两个文件或文件集合的差异,并记录下来,生成一个diff文件,这也是我们常说的补丁文件。也使用patch命令对相应的文件打补丁。differential [ˌdɪfəˈrenʃəl]

语法:diff    [options]   FILES

FILES的格式:

  • FILE1 FILE2 :源是一个文件,目标也是文件。这两个文件必须是文本文件。以逐行的方式,比较文本文件的异同处。
  • DIR1 DIR2   :源是一个目录,目标是目录。diff 命令会比较两个目录下名字相同的文本文件,依照字母次序排序,列出不同的二进制文件,列出公共子目录,列出只在一个目录出现的文件。
  • FILE DIR     :源是一个文件,目标是目录。diff命令把源文件与目标目录下的同名文件比较。
  • DIR FILE     :源是一个目录,目标是文件(不是目录)。源目录下所有文件中与目标文件同名的文件,将用来与目标文件比较。

FILE可以是“-”,代表由标准输入设备读入的文本。DIR不能是"-"。
被指定的文件不能是标准的输入。

 

短选项长选项含义
-i--ignore-case忽略文件内容大小写的区别
 --ignore-file-name-case忽略文件名大小写的区别
 --no-ignore-file-name-case不忽略文件名大小写的区别
-E--ignore-tab-expansion忽略由制表符宽度造成的差异
-b--ignore-space-change忽略由空格数不同造成的差异
-w--ignore-all-space忽略所有空格
-B--ignore-blank-lines忽略任何因空行而造成的差异
-I--ignore-matching-lines=RE如果某行匹配正则表达式,则忽略由该行造成的差异
-a--text所有文件都以文本方式处理
 --strip-trailing-cr去除输入内容每行末端的 carriage return 字符
-c 或 或 -C 行数--context[=行数]显示指定<行数>(默认 3 行)copied 格式的上下文
-u  或 -U 行数--unified[=行数]显示指定<行数>(默认 3 行)unified 格式的上下文(合并的方式)
 --label 标识使用<标识>代替文件名称
-p--show-c-function显示和每个差异有关的 C 函数名称
-F --show-function-line=RE显示最接近而符合<正则表示式>的一行
-q--brief只显示文件是否不同
-e--ed以 ed script 方式输出
 --normal以正常的 diff 方式输出
-n--rcs以 RCS diff 格式输出
-y--side-by-side以两列并排的方式显示
-W--width=NUM每行显示最多 NUM (默认 130) 个字符
 --left-column当有两行相同时只显示左边的一行
 --suppress-common-lines当有两行相同时不会显示
-D--ifdef=NAME输出的内容以‘#ifdef NAME’方式标明差异
 --GTYPE-group-format=GFMT效果类似,但会以 GFMT 格式处理 GTYPE 输入的行
可以是LTYPE的选择或是‘changed’
 --line-format=LFMT效果类似,但会以 LFMT 格式处理每一行资料
 --LTYPE-line-format=LFMT效果类似,但会以 LFMT 格式处理 LTYPE 输入的行
LTYPE 可以是‘old’、‘new’或‘unchanged’。
-l--paginate将输出送至‘pr’指令来分页
-t--expand-tabs将输出中的 tab 转换成空格
-T--initial-tab每行先加上 tab 字符,使 tab 字符可以对齐
 --tabsize=NUM定位字符 (tab) 的宽度,默认为 8 个空格宽
 --suppress-blank-emptysuppress space or tab before empty output lines
-r--recursive递归比较子目录中的文件
-N--new-file不存在的文件以空文件方式处理
 --unidirectional-new-file若第一文件案不存在,以空文件处理
-s--report-identical-files文件相同则报告,否则无任何提示
-x--exclude=PAT排除匹配 PAT 的文件
-X--exclude-from=FILE排除所有匹配在 FILE 中列出的模式的文件
-S--starting-file=FILE当比较目录時,由 FILE 开始比较
 --from-file=FILE1将 FILE1 和操作数中的所有文件/目录作比较。FILE1 可以是目录
 --to-file=FILE2将操作数中的所有文件/目录和 FILE2 作比较。FILE2 可以是目录
 --horizon-lines=NUMkeep NUM lines of the common prefix and suffix
-d--minimal尽可能找出最小的差异
 --speed-large-files假设文件十分大而且其中含有许多微小的差异

diff输出格式(默认):
n1 a n3,n4 表示在文件1的n1行后面添加n3到n4行
n1,n2 d n3 表示在n1到n2行之间删除n3行
n1,n2 c n3,n4 表示把n1,n2行用n3,n4行替换掉
字母a:表示附加(add)
字符c:表示修改(change)
字符d:表示删除(delete)
字母前的是源文件,字母后是目标文件。Nx表示行号。
以”<”打头的行属于第一个文件,以”>”打头的行属于第二个文件。

比较文件

log2014.log内容 2013-01 2013-02 2014-03 2013-04 2013-05 2013-06 2013-07 2013-07 2013-09 2013-10 2013-11 2013-12

log2013.log内容 2013-01 2013-02 2013-03 2013-04 2013-05 2013-06 2013-07 2013-08 2013-09 2013-10

比较两个文件 [root@localhost test3]# diff log2014.log log2013.log 3c3 < 2014-03 --- > 2013-03 8c8 < 2013-07 --- > 2013-08 11,12d10 < 2013-11 < 2013-12 上面的“3c3”和“8c8”表示log2014.log和log20143log文件在3行和第8行内容有所不同;"11,12d10"表示第一个文件比第二个文件多了第11和12行。 以”<”打头的行属于第一个文件,以”>”打头的行属于第二个文件。 并排格式输出 [root@localhost test3]# diff log2014.log log2013.log -y -W 50 2013-01 2013-01 2013-02 2013-02 2014-03 | 2013-03 2013-04 2013-04 2013-05 2013-05 2013-06 2013-06 2013-07 2013-07 2013-07 | 2013-08 2013-09 2013-09 2013-10 2013-10 2013-11 < 2013-12 < [root@localhost test3]# diff log2013.log log2014.log -y -W 50 2013-01 2013-01 2013-02 2013-02 2013-03 | 2014-03 2013-04 2013-04 2013-05 2013-05 2013-06 2013-06 2013-07 2013-07 2013-08 | 2013-07 2013-09 2013-09 2013-10 2013-10 > 2013-11 > 2013-12 说明: “|”表示前后2个文件内容有不同 “<”表示后面文件比前面文件少了1行内容 “>”表示后面文件比前面文件多了1行内容 上下文输出格式 [root@localhost test3]# diff log2013.log log2014.log -c *** log2013.log 2012-12-07 16:36:26.000000000 +0800 --- log2014.log 2012-12-07 18:01:54.000000000 +0800 *************** *** 1,10 **** 2013-01 2013-02 ! 2013-03 2013-04 2013-05 2013-06 2013-07 ! 2013-08 2013-09 2013-10 --- 1,12 ---- 2013-01 2013-02 ! 2014-03 2013-04 2013-05 2013-06 2013-07 ! 2013-07 2013-09 2013-10 + 2013-11 + 2013-12 [root@localhost test3]# diff log2014.log log2013.log -c *** log2014.log 2012-12-07 18:01:54.000000000 +0800 --- log2013.log 2012-12-07 16:36:26.000000000 +0800 *************** *** 1,12 **** 2013-01 2013-02 ! 2014-03 2013-04 2013-05 2013-06 2013-07 ! 2013-07 2013-09 2013-10 - 2013-11 - 2013-12 --- 1,10 ---- 2013-01 2013-02 ! 2013-03 2013-04 2013-05 2013-06 2013-07 ! 2013-08 2013-09 2013-10 [root@localhost test3]# 这种方式在开头两行作了比较文件的说明,这里有三中特殊字符: “+” 比较的文件的后者比前着多一行 “-” 比较的文件的后者比前着少一行 “!” 比较的文件两者有差别的行 统一格式输出 [root@localhost test3]# diff log2014.log log2013.log -u --- log2014.log 2012-12-07 18:01:54.000000000 +0800 +++ log2013.log 2012-12-07 16:36:26.000000000 +0800 @@ -1,12 +1,10 @@ 2013-01 2013-02 -2014-03 +2013-03 2013-04 2013-05 2013-06 2013-07 -2013-07 +2013-08 2013-09 2013-10 -2013-11 -2013-12 第一部分,也是文件的基本信息: --- log2014.log 2012-12-07 18:01:54.000000000 +0800 +++ log2013.log 2012-12-07 16:36:26.000000000 +0800 "---"表示变动前的文件,"+++"表示变动后的文件。 第二部分,变动的位置用两个@作为起首和结束 @@ -1,12 +1,10 @@ 前面的"-1,12"分成三个部分:减号表示第一个文件(即log2014.log),"1"表示第1行,"12"表示连续12行。意思:第一个文件从第1行开始的连续12行。 同样的,"+1,10" 表示第二个文件从第1行开始的连续10行。

 

比较文件夹

[root@localhost test]# diff test3 test6 
Only in test6: linklog.log 
Only in test6: log2012.log 
 
[root@localhost test]# diff test3/log2013.log test6/log2013.log 
1,10c1,3 
< 2013-01 
< 2013-02 
< 2013-03 
< 2013-04 
< 2013-05 
< 2013-06 
< 2013-07 
< 2013-08 
< 2013-09 
< 2013-10 
--- 
> hostnamebaidu=baidu.com 
> hostnamesina=sina.com 
> hostnames=true 
 
[root@localhost test]# diff test3/log2014.log test6/log2014.log 
1,12d0 
< 2013-01 
< 2013-02 
< 2014-03 
< 2013-04 
< 2013-05 
< 2013-06 
< 2013-07 
< 2013-07 
< 2013-09 
< 2013-10 
< 2013-11 
< 2013-12 
Only in test6: log2015.log 
Only in test6: log2016.log 
Only in test6: log2017.log 
[root@localhost test]# 

 

比较两个文件不同,并生产补丁

[root@localhost test3]# diff -ruN log2013.log log2014.log >patch.log 
[root@localhost test3]# ll 
总计 12 
-rw-r--r-- 2 root root  80 12-07 16:36 log2013.log 
-rw-r--r-- 1 root root  96 12-07 18:01 log2014.log 
-rw-r--r-- 1 root root 248 12-07 21:33 patch.log 
 
[root@localhost test3]# cat patch.log 
--- log2013.log 2012-12-07 16:36:26.000000000 +0800 
+++ log2014.log 2012-12-07 18:01:54.000000000 +0800 
@@ -1,10 +1,12 @@ 
 2013-01 
 2013-02 
-2013-03 
+2014-03 
 2013-04 
 2013-05 
 2013-06 
 2013-07 
-2013-08 
+2013-07 
 2013-09 
 2013-10 
+2013-11 
+2013-12 
[root@localhost test3]#

 

打补丁

[root@localhost test3]# cat log2013.log 
2013-01 
2013-02 
2013-03 
2013-04 
2013-05 
2013-06 
2013-07 
2013-08 
2013-09 
2013-10 
 
[root@localhost test3]# patch log2013.log patch.log  使用上面产生的patch.log文件打补丁 
patching file log2013.log 
 
[root@localhost test3]# cat log2013.log 
2013-01 
2013-02 
2014-03 
2013-04 
2013-05 
2013-06 
2013-07 
2013-07 
2013-09 
2013-10 
2013-11 
2013-12 
[root@localhost test3]# 

 

实例

实例:有这样两个文件。程序清单1,程序清单2 
程序清单1 :hello.c 
#include 
int main(void) 
{ 
    char msg[] = "Hello world!"; 
    
    puts(msg); 
    printf("Welcome to use diff commond.n"); 
    
    return 0;  
} 
 
程序清单2:hello_diff.c 
#include 
#include  
int main(void) 
{ 
    char msg[] = "Hello world,fome hello_diff.c"; 
    
    puts(msg); 
    printf("hello_diff.c says,'Here you are,using diff.'n"); 
    
    return 0; 
} 
 
 

我们使用diff命令来查看这两个文件的不同之处,有一下几种方便的方法:

1、普通格式输出: [root@localhost diff]# diff hello.c hello_diff.c 1a2 > #include 5c6 char msg[] = "Hello world,fome hello_diff.c"; 8c9 printf("hello_diff.c says,'Here you are,using diff.'n"); [root@localhost diff]# 上面的“1a2”表示后面的一个文件"hello_diff.c"比前面的一个文件"hello.c"多了一行 "5c6"表示第一个文件的第5行与第二个文件的第6行有区别 2、并排格式输出 [root@localhost diff]# diff hello.c hello_diff.c -y -W 130 -W选择可以指定输出列的宽度,这里指定输出列宽为130 #include #include > #include int main(void) int main(void) { { char msg[] = "Hello world!"; | char msg[] = "Hello world,fome hello_diff.c"; puts(msg); puts(msg); printf("Welcome to use diff commond.n"); | printf("hello_diff.c says,'Here you are,using diff.' return 0; return 0; } } [root@localhost diff]# 这种并排格式的对比一目了然,可以快速找到不同的地方。 3、上下文输出格式 [root@localhost diff]# diff hello.c hello_diff.c -c *** hello.c 2007-09-25 17:54:51.000000000 +0800 --- hello_diff.c 2007-09-25 17:56:00.000000000 +0800 *************** *** 1,11 **** #include int main(void) { ! char msg[] = "Hello world!"; puts(msg); ! printf("Welcome to use diff commond.n"); return 0; } --- 1,12 ---- #include + #include int main(void) { ! char msg[] = "Hello world,fome hello_diff.c"; puts(msg); ! printf("hello_diff.c says,'Here you are,using diff.'n"); return 0; } [root@localhost diff]# 这种方式在开头两行作了比较文件的说明,这里有三中特殊字符: + 比较的文件的后者比前着多一行 - 比较的文件的后者比前着少一行 ! 比较的文件两者有差别的行 4、统一输出格式 [root@localhost diff]# diff hello.c hello_diff.c -u --- hello.c 2007-09-25 17:54:51.000000000 +0800 +++ hello_diff.c 2007-09-25 17:56:00.000000000 +0800 @@ -1,11 +1,12 @@ #include +#include int main(void) { - char msg[] = "Hello world!"; + char msg[] = "Hello world,fome hello_diff.c"; puts(msg); - printf("Welcome to use diff commond.n"); + printf("hello_diff.c says,'Here you are,using diff.'n"); return 0; } [root@localhost diff]# 5、其他 [root@localhost diff]# diff hello.c hello_diff.c -q 假如你想查看两个文件是否不同又不想显示差异之处的话,可以加上-q选项 Files hello.c and hello_diff.c differ [root@localhost diff]# [root@localhost diff]# diff hello.c hello_diff.c -c -I include 这里通过“ -I include”选项来忽略带有“ include”字样的行。另外你还可以提供一些匹配规则来忽略某中差别,可以用 -I regexp *** hello.c 2007-09-25 17:54:51.000000000 +0800 --- hello_diff.c 2007-09-25 17:56:00.000000000 +0800 *************** *** 2,11 **** int main(void) { ! char msg[] = "Hello world!"; puts(msg); ! printf("Welcome to use diff commond.n"); return 0; } --- 3,12 ---- int main(void) { ! char msg[] = "Hello world,fome hello_diff.c"; puts(msg); ! printf("hello_diff.c says,'Here you are,using diff.'n"); return 0; } [root@localhost diff]#
下面是我对上面两个网络配置文件对比:

caffePath=/home/x/git/caffe
example=$caffePath/examples/net_surgery
modle=$caffePath/models
test_lb=$example/../test_layer_lb/test_python
diff $example/bvlc_caffenet_full_conv.prototxt $modle/bvlc_reference_caffenet/deploy.prototxt 2>&1 | tee $test_lb/log.txt
输出log.txt:
1,2c1
< # Fully convolutional network version of CaffeNet.
< name: "CaffeNetConv"
---
> name: "CaffeNet"
7,11c6
<   input_param {
<     # initial shape for a fully convolutional network:
<     # the shape can be set for each input by reshape.
<     shape: { dim: 1 dim: 3 dim: 451 dim: 451 }
<   }
---
>   input_param { shape: { dim: 1 dim: 3 dim: 227 dim: 227 } }
157,158c152,153
<   name: "fc6-conv"
<   type: "Convolution"
---
>   name: "fc6"
>   type: "InnerProduct"
160,161c155,156
<   top: "fc6-conv"
<   convolution_param {
---
>   top: "fc6"
>   inner_product_param {
163d157
<     kernel_size: 6
169,170c163,164
<   bottom: "fc6-conv"
<   top: "fc6-conv"
---
>   bottom: "fc6"
>   top: "fc6"
175,176c169,170
<   bottom: "fc6-conv"
<   top: "fc6-conv"
---
>   bottom: "fc6"
>   top: "fc6"
182,186c176,180
<   name: "fc7-conv"
<   type: "Convolution"
<   bottom: "fc6-conv"
<   top: "fc7-conv"
<   convolution_param {
---
>   name: "fc7"
>   type: "InnerProduct"
>   bottom: "fc6"
>   top: "fc7"
>   inner_product_param {
188d181
<     kernel_size: 1
194,195c187,188
<   bottom: "fc7-conv"
<   top: "fc7-conv"
---
>   bottom: "fc7"
>   top: "fc7"
200,201c193,194
<   bottom: "fc7-conv"
<   top: "fc7-conv"
---
>   bottom: "fc7"
>   top: "fc7"
207,211c200,204
<   name: "fc8-conv"
<   type: "Convolution"
<   bottom: "fc7-conv"
<   top: "fc8-conv"
<   convolution_param {
---
>   name: "fc8"
>   type: "InnerProduct"
>   bottom: "fc7"
>   top: "fc8"
>   inner_product_param {
213d205
<     kernel_size: 1
219c211
<   bottom: "fc8-conv"
---
>   bottom: "fc8"

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值