Tensorflow对比AlexNet的CPU和GPU运算效率

本文对比了使用Tensorflow在CPU和GPU上运行AlexNet模型的效率,结果显示GPU运算速度比CPU快约50倍。尽管如此,作者在2018年的记录中提到自己仍未拥有GPU。
摘要由CSDN通过智能技术生成

代码及注释

from datetime import datetime
import math
import time 
import tensorflow as tf
c:\program files\python\python36\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
batch_size = 32
num_batches = 100# 该函数用来显示网络每一层的结构,展示tensor的尺寸
def print_activations(t):
    print(t.op.name, ' ',t.get_shape().as_list())

​
# with tf.name_scope('conv1') as scope  # 可以将scope之内的variable自动命名为conv1/xxx,便于区分不同组件
def inference(images):
    parameters = []
    # 第一个卷积层
    with tf.name_scope('conv1') as scope:
        # 卷积核、截断正态分布
        kernel = tf.Variable(tf.truncated_normal([11,11,3,64],
                                                dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME')
        # 可训练
        biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv1 = tf.nn.relu(bias,name=scope)
        print_activations(conv1)
        parameters += [kernel,biases]
        # 再加LRN和最大池化层,除了AlexNet,基本放弃了LRN,说是效果不明显,还会减速?
        lrn1 = tf.nn.lrn(conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
        pool1 = tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides = [1,2,2,1],padding='VALID',name='pool1')
        print_activations(pool1)
    # 第二个卷积层,只有部分参数不同
    with tf.name_scope('conv2') as scope:
        kernel = tf.Variable(tf.truncated_normal([5,5,64,192],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(pool1,kernel,[1,1,1,1],padding='SAME')
        biases = tf.Variable(tf.constant(0.0,shape=[192],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv2 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv2)
        # 稍微处理一下
        lrn2 = tf.nn.lrn(conv2,4,bias=1.0,alpha = 0.001/9,beta=0.75,name='lrn2')
        pool2 = tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool2')
        print_activations(pool2)
    # 第三个
    with tf.name_scope('conv3') as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,192,384],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding='SAME')
        biases = tf.Variable(tf.constant(0.0,shape=[384],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv3 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv3)
    # 第四层
    with tf.name_scope('conv4') as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,384,256],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME')
        biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),trainable=True,name = 'biases')
        bias = tf.nn.bias_add(conv,biases)
        conv4 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv4)
    # 第五个
    with tf.name_scope('conv5') as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,256,256],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding= 'SAME')
        biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv5 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv5)

        # 之后还有最大化池层
        pool5 = tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides = [1,2,2,1],padding='VALID',name = 'pool5')
        print_activations(pool5)
        return pool5,parameters

# 全连接层
​
​
# 评估每轮计算时间,第一个输入是tf得Session,第二个是运算算子,第三个是测试名称
# 头几轮有显存加载,cache命中等问题,可以考虑只计算第10次以后的
def time_tensorflow_run(session,target,info_string):
    num_steps_burn_in = 10
    total_duration = 0.0
    total_duration_squared = 0.0
    # 进行num_batches+num_steps_burn_in次迭代
    # 用time.time()记录时间,热身过后,开始显示时间
    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time()
        _ = session.run(target)
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i %10:
                print('%s:step %d, duration = %.3f' % (datetime.now(),i-num_steps_burn_in,duration))
            total_duration += duration
            total_duration_squared += duration*duration
        # 计算每轮迭代品均耗时和标准差sd
        mn = total_duration/num_batches
        vr = total_duration_squared/num_batches - mn*mn
        sd = math.sqrt(vr)
        print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(),info_string,num_batches,mn,sd))
​
def run_benchmark():
    # 首先定义默认的Graph
    with tf.Graph().as_default():
    # 并不实用ImageNet训练,知识随机计算耗时
        image_size = 224
        images = tf.Variable(tf.random_normal([batch_size,image_size,image_size,3],dtype=tf.float32,stddev=1e-1))
        pool5,parameters = inference(images)
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)
        # 下面直接用pool5传入训练(没有全连接层)
        # 只是做做样子,并不是真的计算
        time_tensorflow_run(sess,pool5,"Forward")
        # 瞎弄的,伪装
        objective = tf.nn.l2_loss(pool5)
        grad = tf.gradients(objective,parameters)
        time_tensorflow_run(sess,grad,"Forward-backward")

run_benchmark()
cpu运行结果:

conv1  [32, 56, 56, 64]
conv1/pool1  [32, 27, 27, 64]
conv2  [32, 27, 27, 192]
conv2/pool2  [32, 13, 13, 192]
conv3  [32, 13, 13, 384]
conv4  [32, 13, 13, 256]
conv5  [32, 13, 13, 256]
conv5/pool5  [32, 6, 6, 256]
2018-04-07 22:04:02.078231: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:03.381361: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:04.645201: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:05.934559: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:07.261441: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:08.600831: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:09.864171: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:11.132014: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:12.440383: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:13.715730: Forward across 100 steps, 0.000 +/- 0.000 sec / batch
2018-04-07 22:04:15.047116:step 0, duration = 1.330
2018-04-07 22:04:15.047616: Forward across 100 steps, 0.013 +/- 0.132 sec / batch
2018-04-07 22:04:16.427534: Forward across 100 steps, 0.027 +/- 0.190 sec / batch
2018-04-07 22:04:17.825463: Forward across 100 steps, 0.041 +/- 0.234 sec / batch
2018-04-07 22:04:19.149843: Forward across 100 steps, 0.054 +/- 0.266 sec / batch
2018-04-07 22:04:20.465219: Forward across 100 steps, 0.067 +/- 0.294 sec / batch
2018-04-07 22:04:21.931704: Forward across 100 steps, 0.082 +/- 0.325 sec / batch
2018-04-07 22:04:23.300113: Forward across 100 steps, 0.096 +/- 0.349 sec / batch
2018-04-07 22:04:24.705568: Forward across 100 steps, 0.110 +/- 0.373 sec / batch
2018-04-07 22:04:26.021455: Forward across 100 steps, 0.123 +/- 0.391 sec / batch
2018-04-07 22:04:27.387037: Forward across 100 steps, 0.137 +/- 0.410 sec / batch
2018-04-07 22:04:28.670890:step 10, duration = 1.283
2018-04-07 22:04:28.671891: Forward across 100 steps, 0.149 +/- 0.425 sec / batch
2018-04-07 22:04:29.951741: Forward across 100 steps, 0.162 +/- 0.440 sec / batch
2018-04-07 22:04:31.253607: Forward across 100 steps, 0.175 +/- 0.454 sec / batch
2018-
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值