theano-xnor-net代码注释5 cifar10_test.py

import sys, os, time
import argparse
import lasagne
import numpy as np
import theano
import theano.tensor as T
import cPickle, time
import inf_layers
from fxp_helper import convert_fxp_format, fixed_point
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, '..', 'train'))
import cnn_utils
import gzip
from collections import OrderedDict
import xnornet_layers
from inf_utils import set_network_params


def parse_args():
    """Argument parser for this script
    """
    #利用argparse模块增加命令行可执行参数,‘--XX’为参数标签,dest为目标,如果提供dest,例如dest="a",那么可以通过args.a访问该参数。
    #type为将字符串转化为规定模式,default为默认值,choice为可选择范围,为一个列表,help为解释该参数
    parser = argparse.ArgumentParser(description='Test CIFAR-10 classification performance using XNOR-Net')
    parser.add_argument('--model', dest='model_file', help='XNOR-Net trained model file in .npz format')
    parser.add_argument('--no', dest='no_imgs', type=int, help='Number of images to test. Max = 10000')
    parser.add_argument('--mode', dest='mode', default='float', choices=['fixed', 'float'],
        help='Arithmetic mode, default = float')

    # parse command line args
    #sys.argv存的是命令行参数,sys.argv[0]中存的系统地址,所以参数是从sys.argv[1]开始的。if条件语句给的是如果终端参数标签少于5,即
    #除了有default参数可以不填,还至少有“cifar10_test.py”、“--model”、“modelpath”、“--no”、“no_img”5个长度
    if(len(sys.argv) < 5):
        parser.print_help()
        sys.exit()
    #使用parse_args()函数将上述添加的parser参数实例化为一个对象args,这样就可以调用类似于dest="a",调用args.a的操作,返回args
    args = parser.parse_args()
    return args

def construct_cifar10_testnet(input_var, data_format='float'):
    #以下四个参数只作用于该工程自己定义的定点模式下(data_format='fixed'),可以将数据值存储长度压缩至规定data_bits范围内,且使得数据关于原点对称,加速收敛
    data_bits = 15
    conv_int_bits = 8
    norm_int_bits = 3
    fc_int_bits = 10

    print('Constructing the network...')
    ##################################################
    #
    # 结构:
    #
    # 输入层
    #   |
    # 卷积层1 + BN层
    #   |
    # 卷积层2(xnor) + maxpooling + BN层
    #   |
    # 卷积层3(xnor) + BN层
    #   |
    # 卷积层4(xnor) + maxpooling + BN层
    #   |
    # 卷积层5(xnor) + BN层
    #   |
    # 卷积层6(xnor) + maxpooling + BN层
    #   |
    # 全连接层1(xnor) + BN层
    #   |
    # 全连接层2(xnor) + BN层
    #   |
    # 全连接层3
    #   |
    # 输出
    #
    ##################################################

    #输入层,传入数据大小为(None, 3, 32, 32),None表示不确定,即每张图大小为3*32*32,而每次测试batch_size大小不确定
    cnn = lasagne.layers.InputLayer(shape=(None, 3, 32, 32), input_var=input_var)

    # Input conv layer is not binary. As the paper states, the computational savings are very less
    # when the input channels to the conv layer are less
    #第一层卷积:根据论文所述,由于加速与激活图层深度与卷积大小关系密切,输入层有RGB三层,所以不使用xnor-conv.传入上一层的cnn,这是一个InputLayer实例
    #format为[float,fixed]的一种、fileter_size、num_filters为卷积核参数,pad为填充方式,nonlinearity为重载参数,定义用什么激活函数,在此工程里为线性,即没有用激活函数
    cnn = inf_layers.Conv2DLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=conv_int_bits,
        num_filters=128, 
        filter_size=(3, 3),
        pad=1,
        nonlinearity=lasagne.nonlinearities.identity)
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化卷积层
    cnn = xnornet_layers.Conv2DLayer(
            cnn,
            format=data_format,
            data_bits=data_bits,
            int_bits=conv_int_bits,
            num_filters=128, 
            filter_size=(3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.identity)

    #maxpooling降采样层
    cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化卷积层
    cnn = xnornet_layers.Conv2DLayer(
            cnn, 
            format=data_format,
            data_bits=data_bits,
            int_bits=conv_int_bits,
            num_filters=256, 
            filter_size=(3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.identity)
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化卷积层
    cnn = xnornet_layers.Conv2DLayer(
            cnn, 
            format=data_format,
            data_bits=data_bits,
            int_bits=conv_int_bits,
            num_filters=256, 
            filter_size=(3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.identity)

    #maxpooling降采样层
    cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化卷积层
    cnn = xnornet_layers.Conv2DLayer(
            cnn, 
            format=data_format,
            data_bits=data_bits,
            int_bits=conv_int_bits,
            num_filters=512, 
            filter_size=(3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.identity)
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化卷积层
    cnn = xnornet_layers.Conv2DLayer(
            cnn, 
            format=data_format,
            data_bits=data_bits,
            int_bits=conv_int_bits,
            num_filters=512, 
            filter_size=(3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.identity)
    #maxpooling降采样层
    cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化FC层
    cnn = xnornet_layers.DenseLayer(
            cnn, 
            format=data_format,
            data_bits=data_bits,
            int_bits=fc_int_bits,
            nonlinearity=lasagne.nonlinearities.identity,
            num_units=1024)
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #二值化FC层
    cnn = xnornet_layers.DenseLayer(
            cnn,
            format=data_format,
            data_bits=data_bits,
            int_bits=fc_int_bits,
            nonlinearity=lasagne.nonlinearities.identity,
            num_units=1024)
    #BN层
    cnn = inf_layers.BatchNormLayer(
        cnn,
        format=data_format,
        data_bits=data_bits,
        int_bits=norm_int_bits)
    #普通FC层,据论文所述 S=64cNw/(cNw+64),只与深度与卷积核规模有关,最后一层为*1的卷积核,提速效率不高,故不使用二值化
    cnn = inf_layers.DenseLayer(
            cnn,
            format=data_format,
            data_bits=data_bits,
            int_bits=fc_int_bits, 
            nonlinearity=lasagne.nonlinearities.softmax,
            num_units=10)

    return cnn

def test_cifar(model, no_imgs, arith_format):
    #model为训练好的模型,no_imgs为测试图片数目,arith_format为fixed或者float的一种

    # input data, target and learning rate as theano symbolic var
    #theano.tensor.tensor4为一个4维张量,theano.tensor.fmatrix为一个float类型矩阵
    input_vars = T.tensor4('input')
    targets = T.fmatrix('target')

    # construct deep network
    print('Constructing the network...')
    net = construct_cifar10_testnet(input_vars, arith_format)

    # Load data
    print('Loading the data...')
    train_x, val_x, test_x, train_y, val_y, test_y = cnn_utils.load_data('cifar10')
    #如果输入的测试图片输入大于10000,则设置测试图片数为10000
    if(no_imgs > len(test_x)):
        print('Max available test images = {:d}'.format(len(test_x)))
        print('Testing with max number of available test images')
        no_imgs = len(test_x)

    # test prediction and loss expressions
    print('Creating test prediction, loss and error expressions...')
    #计算通过网络后的计算输出,然后比较no_img个数据test_pred与真值是否相同,把不相同的记录下来求得平均误差
    test_pred = lasagne.layers.get_output(net, deterministic=True)
    test_err = T.mean(T.neq(T.argmax(test_pred, axis=1), T.argmax(targets, axis=1)),dtype=theano.config.floatX)
    #theano的计算格式:  theano.function([输入],输出)
    test_fn = theano.function([input_vars, targets], test_err)

    print('Initializing the model parameters...')
    #np.savez由于在train过程中保存模型为np.savez(),numpy的这个函数将get_all_param_values()得到的每个param以固定格式arr_n来存储
    #其中n为0,1,2,3...
    #.format(i)为格式化字符串形式,类似与c++中的sprintf功能类似
    with np.load(model) as mf:
        params = [mf['arr_{:d}'.format(i)] for i in range(len(mf.files))]
    #将训练好的model中的参数给网络中各层之间的self.params赋值
    set_network_params(net, params)

    # Binarize the weights. The weight scaling factors are already part of the model.
    # No need to compute them again
    # 由于训练过程只提供一部分参数(比如W与α),测试过程中还设置了一些与输入与操作相关的self.params(例如H与β),所以重新获取一边params,相当于对param进行了一次汇总
    #此时param是一个列表,用引索可以调用每层之间类参数
    params = lasagne.layers.get_all_params(net)
    # first conv layer and last dense dense layer which has 2 parameters is not xnor. 
    #Hence leave first(W) last 2 params(W, b) from binarization
    #由于第一层与最后一层没有使用二值化,所以只取有二值化的层进行权重设置,由于得到的model中存的是self.W,为了进行二值化操作,将W设置为Wb
    for param in params[1:-2]:
        if param.name == "W":
            param.set_value(xnornet_layers.SignNumpy(param.get_value()))

    if(arith_format == 'fixed'):
        print('Using FIXED point mode for testing...')
        # fixced point # of bits excluding sign bit for all parameters
        param_total_bits = 15
        convert_fxp_format(lasagne.layers.get_all_params(net), param_total_bits)
        # input data is in the range [-1 +1] use 7 bits for magnitude (1 bit for sign)
        test_x = fixed_point(test_x, 7, 0)

    # start testing
    print('Testing {:d} images'.format(no_imgs))
    #记录时间
    start_time = time.time()
    #调用test_fn = theano.function([input_vars, targets], test_err)函数,得到错误率
    error_batch = test_fn(test_x[0:no_imgs], test_y[0:no_imgs]) * 100
    #返回测试时间
    runtime = time.time() - start_time
    print('Testing Accuracy = {:f}%'.format(100 - error_batch))
    print('Test time = {:f} seconds'.format(runtime))
#main函数
if __name__=='__main__':
    #运行parse_args()
    args = parse_args()
    #运行test_cifar()
    test_cifar(args.model_file, args.no_imgs, args.mode) 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值