Tensorflow学习之实现卷积神经网络(五)

本次学习的卷积神经网络结构为GoogLenet,其中最显著的改进就是提出了inception结构,而inception结构也随着对网络模型的进一步深入研究,共提出了四个版本。其中Inception V1最大的特点就是控制了计算量和参数量的同时,获得了非常好的分类性能,其降低参数量的目的有两点:第一,参数越多说明模型越庞大,需要供模型学习的数据量就越大;第二,参数越多,耗费的计算资源也会更大。Inception V1参数少但效果更好的原因除了模型层数更深、表达能力更强外,还有就是除去了最后的全连接层,用全局平均池化层(即将图片尺寸变成1x1来取代它),好处有两点:1.模型训练更快;2.减轻了过拟合。
一般来说,卷积层要提升表达能力,主要依靠增加输出通道,但副作用是计算量增大和过拟合。其中inception结构中利用到了1x1卷积,其不但可以跨通道阻滞信息,提高网络的表达能力,同时可以对输出通道升维和降维,其用很小的计算量就能增加一层特征变换和非线性化。
googlenet的第二个版本V2,用两个3x3的卷积串联代替5x5,三个3x3卷积串联代替7x7,还提出了BN即Batch Normalization,其为一个非常有效的正则化方法,用于加速训练,同时收敛后的分类准确率也可以得到大幅提高。
第三个版本v3引入了分解成小卷积的思想即将一个较大而二维卷积拆分成两个较小的一维卷积,节约了大量参数,加速运算并减轻了过拟合,同时增加了一层非线性扩展模型表达能力。第四个版本v4结合了ResNet。下面即设计一个inception v3结构的网络。

import tensorflow as tf
import time
import math
from datetime import datetime
slim = tf.contrib.slim
trunc_normal = lambda  stddev: tf.truncated_normal_initializer(0.0,stddev)

#下面的函数用来生成网络中经常用到的函数的默认参数
def inception_v3_arg_scope(weight_decay = 0.00004,stddev = 0.1,batch_norm_var_collection = 'moving_vars'):
    batch_norm_params = {
        'decay':0.9997,
        'epsilon':0.001,
        'updates_collections':tf.GraphKeys.UPDATE_OPS,
        'variables_collections':{
            'beta':None,
            'gamma':None,
            'moving_mean':[batch_norm_var_collection],
            'moving_variance':[batch_norm_var_collection],
            }
    }
    with slim.arg_scope([slim.conv2d,slim.fully_connected],weights_regularizer = slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
            [slim.conv2d],
            weights_initializer = tf.truncated_normal_initializer(stddev=stddev),
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params) as sc:
            return sc
#下面的函数用来生成inceptionV3网络的卷积部分
def inception_v3_base(inputs,scope=None):
#用于保存某些关键点
    end_points={}
    with tf.variable_scope(scope,'InceptionV3',[inputs]):
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='VALID'):
            net = slim.conv2d(inputs,32,[3,3],stride=2,scope='Conv2d_1a_3x3')
            net = slim.conv2d(net,32,[3,3],scope='Conv2d_2a_3x3')
            net = slim.conv2d(net, 64, [3, 3],padding = 'SAME', scope='Con2d_2b_3x3')
            net = slim.max_pool2d(net,[3,3],stride=2,scope='MaxPool_3a_3x3')
            net = slim.conv2d(net,80,[1,1],scope='Conv2d_3b_1x1')
            net = slim.conv2d(net,192,[3,3],scope='Conv2d_4a_3x3')
            net = slim.max_pool2d(net,[3,3],stride=2,scope='MaxPool_5a_3x3')
#第一个inception模块组包含三个inception module
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='SAME'):
            with tf.variable_scope('Mixed_5b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,64,[3,3],scope='Conv2d_0b_3x3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,32,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_5c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,64,[3,3],scope='Conv2d_0b_3x3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,64,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_5d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,48,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,64,[3,3],scope='Conv2d_0b_3x3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0b_3x3')
                    branch_2 = slim.conv2d(branch_2,96,[3,3],scope='Conv2d_0c_3x3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,64,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
#第二个inception模块组包含五个inception module
            with tf.variable_scope('Mixed_6a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,384,[3,3],stride=2,padding = 'VALID',scope='Conv2d_0a_3x3')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,64,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,96,[3,3],scope='Conv2d_0b_3x3')
                    branch_1 = slim.conv2d(branch_1,96,[3,3],stride=2,padding='VALID',scope='Conv2d_0c_3x3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.max_pool2d(net,[3,3],stride=2,padding = 'VALID',scope='MaxPool_0a_3x3')
                net = tf.concat([branch_0,branch_1,branch_2],3)
            with tf.variable_scope('Mixed_6b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,128,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,128,[1,7],scope='Con2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c__7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,128,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,128,[7,1],scope='Conv2d_0b_7x1')
                    branch_2 = slim.conv2d(branch_2,128,[1,7],scope='Conv2d_0c_1x7')
                    branch_2 = slim.conv2d(branch_2,128,[7,1],scope='Conv2d_0d_7x1')
                    branch_2 = slim.conv2d(branch_2,192,[1,7],scope='Conv2d_0e_1x7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_6c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,192,[1,1],scope = 'Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0c_7x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0d_1x7')
                    branch_2 = slim.conv2d(branch_2,192,[7,1],scope='Conv2d_0e_7x1')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_6d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,192,[1,1],scope = 'Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0c_7x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0d_1x7')
                    branch_2 = slim.conv2d(branch_2,192,[7,1],scope='Conv2d_0e_7x1')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_6e'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,192,[1,1],scope = 'Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,160,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0b_1x7')
                    branch_2 = slim.conv2d(branch_2,160,[7,1],scope='Conv2d_0c_7x1')
                    branch_2 = slim.conv2d(branch_2,160,[1,7],scope='Conv2d_0d_1x7')
                    branch_2 = slim.conv2d(branch_2,192,[7,1],scope='Conv2d_0e_7x1')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            end_points['Mixed_6e'] = net
#第三个inception组包含3个inception module
            with tf.variable_scope('Mixed_7a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                    branch_0 = slim.conv2d(branch_0,320,[3,3],stride=2,padding='VALID',scope='Conv2d_0b_3x3')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,192,[1,1],scope='Conv2d_0a_1x1')
                    branch_1 = slim.conv2d(branch_1,192,[1,7],scope='Conv2d_0b_1x7')
                    branch_1 = slim.conv2d(branch_1,192,[7,1],scope='Conv2d_0c_7x1')
                    branch_1 = slim.conv2d(branch_1,192,[3,3],stride=2,padding='VALID',scope='Conv2d_0d_3x3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='MaxPool_0a_3x3')
                net = tf.concat([branch_0,branch_1,branch_2],3)
            with tf.variable_scope('Mixed_7b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net,320,[1,1],scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net,384,[1,1],scope='COnv2d_0a_1x1')
                    branch_1 = tf.concat([
                        slim.conv2d(branch_1,384,[1,3],scope='Conv2d_0b_1x3'),
                        slim.conv2d(branch_1,384,[3,1],scope='Conv2d_0b_3x1'),
                    ],3)
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net,448,[1,1],scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2,384,[3,3],scope='Conv2d_0b_3x3')
                    branch_2 = tf.concat([
                        slim.conv2d(branch_2,384,[1,3],scope='Conv2d_0c_1x3'),
                        slim.conv2d(branch_2,384,[3,1],scope='COnv2d_0c_3x1')
                    ],3)
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net,[3,3],scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3,192,[1,1],scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0,branch_1,branch_2,branch_3],3)
            with tf.variable_scope('Mixed_7c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 384, [1, 1], scope='COnv2d_0a_1x1')
                    branch_1 = tf.concat([
                        slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
                        slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0b_3x1'),
                    ], 3)
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
                    branch_2 = slim.conv2d(branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
                    branch_2 = tf.concat([
                        slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
                        slim.conv2d(branch_2, 384, [3, 1], scope='COnv2d_0c_3x1')
                    ], 3)
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
            return net,end_points
#上述过程每一层卷积、池化或inception模块组的目的都是将空间结构简化,同时将空间信息转化为高阶抽象的特征信息,即将空间维度转换为通道的维度
#下面为全局平均在池化、Softmax和Auxiliary Logits(end_points)
#squeeze操作即去除维数为1的维度,比如5x3x1转化为5x3
def inception_v3(inputs,num_classes=1000,is_training=True,dropout_keep_prob=0.8,prediction_fn=slim.softmax,spatial_squeeze=True,reuse=None,scope='InceptionV3'):
    with tf.variable_scope(scope,'InceptionV3',[inputs,num_classes],reuse=reuse) as scope:
        with slim.arg_scope([slim.batch_norm,slim.dropout],is_training=is_training):
            net,end_points = inception_v3_base(inputs,scope=scope)
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],stride=1,padding='SAME'):
            aux_logits = end_points['Mixed_6e']
            with tf.variable_scope('AuxLogits'):
                aux_logits = slim.avg_pool2d(
                    aux_logits,[5,5],stride=3,padding='VALID',scope='AvgPool_1a_5x5'
                )
                aux_logits = slim.conv2d(aux_logits,128,[1,1],scope='Conv2d_1b_1x1')

                aux_logits = slim.conv2d(
                    aux_logits,768,[5,5],weights_initializer=trunc_normal(0.01),padding='VALID',scope='Conv2d_2a_5x5'
                )
                aux_logits = slim.conv2d(
                    aux_logits,num_classes,[1,1],activation_fn=None,normalizer_fn=None,weights_initializer=trunc_normal(0.001),scope='Conv2d_2d_1x1'
                )
                if spatial_squeeze:
                    aux_logits = tf.squeeze(aux_logits,[1,2],name='SpatialSqueeze')
                end_points['AuxLogits'] = aux_logits
#下面处理正常的分类预测的逻辑
            with tf.variable_scope('Logits'):
                net = slim.avg_pool2d(net,[8,8],padding = 'VALID',scope='AvgPool_1a_8x8')
                net = slim.dropout(net,keep_prob=dropout_keep_prob,scope='Dropout_1b')
                end_points['PreLogits'] = net
                logits = slim.conv2d(net,num_classes,[1,1],activation_fn=None,normalizer_fn=None,scope='Conv2d_1c_1x1')
                if spatial_squeeze:
                    logits = tf.squeeze(logits,[1,2],name='SpatialSqueeze')
            end_points['Logits'] = logits
            end_points['Predictions'] = prediction_fn(logits,scope='Predictions')
        return logits,end_points
def time_tensorflow_run(session,target,info_string):
    num_steps_burn_in = 10 #预热轮数:给程序热身,头几轮迭代有显存加载,cache命中等问题因此可以跳过,只考量10论迭代之后计算时间
    total_duration = 0.0 #总时间
    total_duration_squared = 0.0#平方和用以计算方差
    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time() #记录时间
        _ = session.run(target) #执行每次迭代
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i %10:
                print('%s:step %d,duration = %.3f' %
                      (datetime.now(),i-num_steps_burn_in,duration))
            total_duration +=duration
            total_duration_squared +=duration * duration #以便计算后面每轮耗时的均值和标准差
            mn = total_duration / num_batches #平均耗时
            vr = total_duration_squared / num_batches -mn * mn
            sd = math.sqrt(vr) #标准差
            print('%s:%s across %d steps,%.3f +/- %.3f sec /batch' % (datetime.now(),info_string,num_batches,mn,sd))
batch_size = 32
height,width = 299,299
inputs = tf.random_uniform((batch_size,height,width,3))
with slim.arg_scope(inception_v3_arg_scope()):
    logits,end_points = inception_v3(inputs,is_training=False)
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
num_batches=100
time_tensorflow_run(sess,logits,"Forward")

结果如下

/usr/local/Cellar/anaconda/bin/python /Users/new/Documents/JLIFE/Tensorflow/training/mnist_train.py
2017-09-11 20:14:11.481924: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-09-11 20:14:11.481957: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
2017-09-11 20:14:11.481967: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
2017-09-11 20:14:11.481980: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
2017-09-11 20:16:25.925584:step 0,duration = 11.913
2017-09-11 20:16:25.928357:Forward across 100 steps,0.119 +/- 1.185 sec /batch
2017-09-11 20:16:37.920689:Forward across 100 steps,0.239 +/- 1.673 sec /batch
2017-09-11 20:16:49.893041:Forward across 100 steps,0.359 +/- 2.040 sec /batch
2017-09-11 20:17:01.806376:Forward across 100 steps,0.478 +/- 2.341 sec /batch
2017-09-11 20:17:13.754726:Forward across 100 steps,0.597 +/- 2.604 sec /batch
2017-09-11 20:17:25.686774:Forward across 100 steps,0.717 +/- 2.837 sec /batch
2017-09-11 20:17:37.734875:Forward across 100 steps,0.837 +/- 3.052 sec /batch
2017-09-11 20:17:49.732681:Forward across 100 steps,0.957 +/- 3.246 sec /batch
2017-09-11 20:18:01.761429:Forward across 100 steps,1.077 +/- 3.426 sec /batch
2017-09-11 20:18:13.718493:Forward across 100 steps,1.197 +/- 3.591 sec /batch
2017-09-11 20:18:25.733888:step 10,duration = 12.015
2017-09-11 20:18:25.733929:Forward across 100 steps,1.317 +/- 3.747 sec /batch
2017-09-11 20:18:37.807721:Forward across 100 steps,1.438 +/- 3.894 sec /batch
2017-09-11 20:18:49.838136:Forward across 100 steps,1.558 +/- 4.031 sec /batch
2017-09-11 20:19:01.927928:Forward across 100 steps,1.679 +/- 4.162 sec /batch
2017-09-11 20:19:14.043841:Forward across 100 steps,1.800 +/- 4.286 sec /batch
2017-09-11 20:19:26.147928:Forward across 100 steps,1.921 +/- 4.402 sec /batch
2017-09-11 20:19:39.262516:Forward across 100 steps,2.052 +/- 4.536 sec /batch
2017-09-11 20:19:51.259349:Forward across 100 steps,2.172 +/- 4.638 sec /batch
2017-09-11 20:20:03.665347:Forward across 100 steps,2.296 +/- 4.743 sec /batch
2017-09-11 20:20:15.664248:Forward across 100 steps,2.416 +/- 4.834 sec /batch
2017-09-11 20:20:27.940516:step 20,duration = 12.276
2017-09-11 20:20:27.940593:Forward across 100 steps,2.539 +/- 4.926 sec /batch
2017-09-11 20:20:39.930325:Forward across 100 steps,2.659 +/- 5.008 sec /batch
2017-09-11 20:20:51.947540:Forward across 100 steps,2.779 +/- 5.087 sec /batch
2017-09-11 20:21:04.050760:Forward across 100 steps,2.900 +/- 5.163 sec /batch
2017-09-11 20:21:16.109880:Forward across 100 steps,3.021 +/- 5.234 sec /batch
2017-09-11 20:21:28.173533:Forward across 100 steps,3.142 +/- 5.301 sec /batch
2017-09-11 20:21:40.247486:Forward across 100 steps,3.262 +/- 5.365 sec /batch
2017-09-11 20:21:52.193940:Forward across 100 steps,3.382 +/- 5.424 sec /batch
2017-09-11 20:22:04.154740:Forward across 100 steps,3.501 +/- 5.480 sec /batch
2017-09-11 20:22:16.168818:Forward across 100 steps,3.622 +/- 5.533 sec /batch
2017-09-11 20:22:28.128894:step 30,duration = 11.960
2017-09-11 20:22:28.128950:Forward across 100 steps,3.741 +/- 5.583 sec /batch
2017-09-11 20:22:40.103177:Forward across 100 steps,3.861 +/- 5.629 sec /batch
2017-09-11 20:22:52.125158:Forward across 100 steps,3.981 +/- 5.674 sec /batch
2017-09-11 20:23:04.119120:Forward across 100 steps,4.101 +/- 5.715 sec /batch
2017-09-11 20:23:16.318668:Forward across 100 steps,4.223 +/- 5.756 sec /batch
2017-09-11 20:23:28.275791:Forward across 100 steps,4.343 +/- 5.791 sec /batch
2017-09-11 20:23:40.308313:Forward across 100 steps,4.463 +/- 5.825 sec /batch
2017-09-11 20:23:52.298226:Forward across 100 steps,4.583 +/- 5.855 sec /batch
2017-09-11 20:24:04.414408:Forward across 100 steps,4.704 +/- 5.884 sec /batch
2017-09-11 20:24:16.296670:Forward across 100 steps,4.823 +/- 5.908 sec /batch
2017-09-11 20:24:28.212852:step 40,duration = 11.916
2017-09-11 20:24:28.212890:Forward across 100 steps,4.942 +/- 5.930 sec /batch
2017-09-11 20:24:40.217509:Forward across 100 steps,5.062 +/- 5.950 sec /batch
2017-09-11 20:24:52.132185:Forward across 100 steps,5.181 +/- 5.967 sec /batch
2017-09-11 20:25:04.116278:Forward across 100 steps,5.301 +/- 5.982 sec /batch
2017-09-11 20:25:16.074294:Forward across 100 steps,5.421 +/- 5.994 sec /batch
2017-09-11 20:25:28.018231:Forward across 100 steps,5.540 +/- 6.004 sec /batch
2017-09-11 20:25:39.954666:Forward across 100 steps,5.659 +/- 6.011 sec /batch
2017-09-11 20:25:51.845600:Forward across 100 steps,5.778 +/- 6.016 sec /batch
2017-09-11 20:26:05.351787:Forward across 100 steps,5.913 +/- 6.036 sec /batch
2017-09-11 20:26:21.876468:Forward across 100 steps,6.079 +/- 6.098 sec /batch
2017-09-11 20:26:37.385765:step 50,duration = 15.509
2017-09-11 20:26:37.385806:Forward across 100 steps,6.234 +/- 6.138 sec /batch
2017-09-11 20:26:51.278775:Forward across 100 steps,6.373 +/- 6.153 sec /batch
2017-09-11 20:27:03.570338:Forward across 100 steps,6.496 +/- 6.147 sec /batch
2017-09-11 20:27:19.165365:Forward across 100 steps,6.651 +/- 6.178 sec /batch
2017-09-11 20:27:33.078827:Forward across 100 steps,6.791 +/- 6.183 sec /batch
2017-09-11 20:27:46.810209:Forward across 100 steps,6.928 +/- 6.183 sec /batch
2017-09-11 20:28:00.786247:Forward across 100 steps,7.068 +/- 6.183 sec /batch
2017-09-11 20:28:15.027508:Forward across 100 steps,7.210 +/- 6.183 sec /batch
2017-09-11 20:28:27.434743:Forward across 100 steps,7.334 +/- 6.161 sec /batch
2017-09-11 20:28:39.703114:Forward across 100 steps,7.457 +/- 6.136 sec /batch
2017-09-11 20:28:52.422318:step 60,duration = 12.719
2017-09-11 20:28:52.422373:Forward across 100 steps,7.584 +/- 6.112 sec /batch
2017-09-11 20:29:05.369793:Forward across 100 steps,7.714 +/- 6.087 sec /batch
2017-09-11 20:29:19.235760:Forward across 100 steps,7.852 +/- 6.068 sec /batch
2017-09-11 20:29:31.761207:Forward across 100 steps,7.977 +/- 6.033 sec /batch
2017-09-11 20:29:43.589996:Forward across 100 steps,8.096 +/- 5.992 sec /batch
2017-09-11 20:29:56.476825:Forward across 100 steps,8.225 +/- 5.955 sec /batch
2017-09-11 20:30:11.002840:Forward across 100 steps,8.370 +/- 5.929 sec /batch
2017-09-11 20:30:23.504543:Forward across 100 steps,8.495 +/- 5.883 sec /batch
2017-09-11 20:30:35.733630:Forward across 100 steps,8.617 +/- 5.832 sec /batch
2017-09-11 20:30:47.794630:Forward across 100 steps,8.738 +/- 5.777 sec /batch
2017-09-11 20:30:59.964692:step 70,duration = 12.170
2017-09-11 20:30:59.964733:Forward across 100 steps,8.859 +/- 5.720 sec /batch
2017-09-11 20:31:11.914433:Forward across 100 steps,8.979 +/- 5.658 sec /batch
2017-09-11 20:31:23.701136:Forward across 100 steps,9.097 +/- 5.592 sec /batch
2017-09-11 20:31:36.333498:Forward across 100 steps,9.223 +/- 5.527 sec /batch
2017-09-11 20:31:51.165883:Forward across 100 steps,9.371 +/- 5.477 sec /batch
2017-09-11 20:32:05.027938:Forward across 100 steps,9.510 +/- 5.413 sec /batch
2017-09-11 20:32:17.031699:Forward across 100 steps,9.630 +/- 5.333 sec /batch
2017-09-11 20:32:29.087442:Forward across 100 steps,9.751 +/- 5.250 sec /batch
2017-09-11 20:32:41.552531:Forward across 100 steps,9.875 +/- 5.164 sec /batch
2017-09-11 20:32:55.587893:Forward across 100 steps,10.016 +/- 5.084 sec /batch
2017-09-11 20:33:07.884767:step 80,duration = 12.297
2017-09-11 20:33:07.884808:Forward across 100 steps,10.139 +/- 4.988 sec /batch
2017-09-11 20:33:19.759846:Forward across 100 steps,10.257 +/- 4.885 sec /batch
2017-09-11 20:33:31.866941:Forward across 100 steps,10.378 +/- 4.778 sec /batch
2017-09-11 20:33:44.002791:Forward across 100 steps,10.500 +/- 4.666 sec /batch
2017-09-11 20:33:55.807739:Forward across 100 steps,10.618 +/- 4.547 sec /batch
2017-09-11 20:34:08.899814:Forward across 100 steps,10.749 +/- 4.426 sec /batch
2017-09-11 20:34:22.858711:Forward across 100 steps,10.888 +/- 4.303 sec /batch
2017-09-11 20:34:34.455836:Forward across 100 steps,11.004 +/- 4.162 sec /batch
2017-09-11 20:34:46.054080:Forward across 100 steps,11.120 +/- 4.013 sec /batch
2017-09-11 20:34:58.234396:Forward across 100 steps,11.242 +/- 3.855 sec /batch
2017-09-11 20:35:10.279311:step 90,duration = 12.045
2017-09-11 20:35:10.279361:Forward across 100 steps,11.363 +/- 3.687 sec /batch
2017-09-11 20:35:21.949073:Forward across 100 steps,11.479 +/- 3.505 sec /batch
2017-09-11 20:35:33.753712:Forward across 100 steps,11.597 +/- 3.310 sec /batch
2017-09-11 20:35:45.290767:Forward across 100 steps,11.713 +/- 3.098 sec /batch
2017-09-11 20:35:56.912075:Forward across 100 steps,11.829 +/- 2.866 sec /batch
2017-09-11 20:36:09.432450:Forward across 100 steps,11.954 +/- 2.608 sec /batch
2017-09-11 20:36:24.681580:Forward across 100 steps,12.107 +/- 2.336 sec /batch
2017-09-11 20:36:39.788277:Forward across 100 steps,12.258 +/- 2.015 sec /batch
2017-09-11 20:36:53.871603:Forward across 100 steps,12.399 +/- 1.604 sec /batch
2017-09-11 20:37:06.146323:Forward across 100 steps,12.521 +/- 1.010 sec /batch

Process finished with exit code 0
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值