关于TensorFlow GPU 跑神经网络爆内存的处理办法

     本人上一篇博客介绍了vgg16模型的finetuning(网址:点击打开链接),在cpu上跑没问题,但是我在全部复制到gpu(gtx750TI)上跑却发生爆内存的情况,下面介绍一下我的解决办法。

      不得不承认爆内存跟我的渣渣gpu有关,为了尽量的减少gpu内存的使用率,在加载vgg16模型的时候并没有直接调用命令,而是把网络的权重跟偏置都设为常值,直接赋值,减少了以后sess.run(tf.global_variables_initializer())这句明令时因为run所有的变量而带来的内存消耗。

      关于vgg16模型下载上面的链接中有,自行查看。而查看vgg16模型里面的具体代码如下,我们可以看见每一部分的数值:

weight_file = 'vgg/vgg16_weights.npz' #自己改文件的地址
weights = np.load(weight_file)  
keys = sorted(weights.keys())  
print(weights[keys[29]])#此处查看的是第30个部分的数值

      其次关于网上说的batch_size太大也有关系,我电脑这次设置的batch_size为16,实验成功。

整个vgg16在gpu上训练的代码如下:

import tensorflow as tf  
import numpy as np  
import os 

N_CLASSES = 2  # 2个输出神经元,[1,0] 或者 [0,1]猫和狗的概率  
IMG_W = 224  # 重新定义图片的大小,图片如果过大则训练比较慢    
IMG_H = 224    
BATCH_SIZE = 16  #每批数据的大小  
CAPACITY = 256  #队列容量,队列中最多容纳图片的个数   
MAX_STEP = 15000 # 训练的步数,应当 >= 10000  
learning_rate = 0.0001 # 学习率,建议刚开始的 learning_rate <= 0.0001  

file_dir = 'catdog/train/'   


weight_file = 'vgg/vgg16_weights.npz' 
weights = np.load(weight_file)  
keys = sorted(weights.keys())  
#print(weights[keys[0]])
  
def network(imgs, batch_size, n_classes):   

    # zero-mean input  
    with tf.name_scope('preprocess') as scope:  
        mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')  
        images = imgs-mean  

    # conv1_1  
    with tf.name_scope('conv1_1') as scope:  
        kernel = weights[keys[0]]
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[1]]
        out = tf.nn.bias_add(conv, biases)  
        conv1_1 = tf.nn.relu(out, name=scope)    

    # conv1_2  
    with tf.name_scope('conv1_2') as scope:  
        kernel = weights[keys[2]] 
        conv = tf.nn.conv2d(conv1_1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[3]]  
        out = tf.nn.bias_add(conv, biases)  
        conv1_2 = tf.nn.relu(out, name=scope)    

    # pool1  
    pool1 = tf.nn.max_pool(conv1_2,  
                            ksize=[1, 2, 2, 1],  
                            strides=[1, 2, 2, 1],  
                            padding='SAME',  
                            name='pool1')  

    # conv2_1  
    with tf.name_scope('conv2_1') as scope:  
        kernel = weights[keys[4]]
        conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[5]] 
        out = tf.nn.bias_add(conv, biases)  
        conv2_1 = tf.nn.relu(out, name=scope)    

    # conv2_2  
    with tf.name_scope('conv2_2') as scope:  
        kernel = weights[keys[6]] 
        conv = tf.nn.conv2d(conv2_1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[7]] 
        out = tf.nn.bias_add(conv, biases)  
        conv2_2 = tf.nn.relu(out, name=scope)  
        
    # pool2  
    pool2 = tf.nn.max_pool(conv2_2,  
                            ksize=[1, 2, 2, 1],  
                            strides=[1, 2, 2, 1],  
                            padding='SAME',  
                            name='pool2')  

    # conv3_1  
    with tf.name_scope('conv3_1') as scope:  
        kernel = weights[keys[8]]
        conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[9]]
        out = tf.nn.bias_add(conv, biases)  
        conv3_1 = tf.nn.relu(out, name=scope)  

    # conv3_2  
    with tf.name_scope('conv3_2') as scope:  
        kernel = weights[keys[10]]
        conv = tf.nn.conv2d(conv3_1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[11]] 
        out = tf.nn.bias_add(conv, biases)  
        conv3_2 = tf.nn.relu(out, name=scope)  
        

    # conv3_3  
    with tf.name_scope('conv3_3') as scope:  
        kernel = weights[keys[12]] 
        conv = tf.nn.conv2d(conv3_2, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[13]] 
        out = tf.nn.bias_add(conv, biases)  
        conv3_3 = tf.nn.relu(out, name=scope)  
        

    # pool3  
    pool3 = tf.nn.max_pool(conv3_3,  
                            ksize=[1, 2, 2, 1],  
                            strides=[1, 2, 2, 1],  
                            padding='SAME',  
                            name='pool3')  

    # conv4_1  
    with tf.name_scope('conv4_1') as scope:  
        kernel = weights[keys[14]]
        conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[15]] 
        out = tf.nn.bias_add(conv, biases)  
        conv4_1 = tf.nn.relu(out, name=scope)   

    # conv4_2  
    with tf.name_scope('conv4_2') as scope:  
        kernel = weights[keys[16]] 
        conv = tf.nn.conv2d(conv4_1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[17]]
        out = tf.nn.bias_add(conv, biases)  
        conv4_2 = tf.nn.relu(out, name=scope)  
        

    # conv4_3  
    with tf.name_scope('conv4_3') as scope:  
        kernel = weights[keys[18]]
        conv = tf.nn.conv2d(conv4_2, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[19]] 
        out = tf.nn.bias_add(conv, biases)  
        conv4_3 = tf.nn.relu(out, name=scope)  
        

    # pool4  
    pool4 = tf.nn.max_pool(conv4_3,  
                            ksize=[1, 2, 2, 1],  
                            strides=[1, 2, 2, 1],  
                            padding='SAME',  
                            name='pool4')  

    # conv5_1  
    with tf.name_scope('conv5_1') as scope:  
        kernel = weights[keys[20]]
        conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[21]]  
        out = tf.nn.bias_add(conv, biases)  
        conv5_1 = tf.nn.relu(out, name=scope)  
         

    # conv5_2  
    with tf.name_scope('conv5_2') as scope:  
        kernel = weights[keys[22]]  
        conv = tf.nn.conv2d(conv5_1, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[23]] 
        out = tf.nn.bias_add(conv, biases)  
        conv5_2 = tf.nn.relu(out, name=scope)  
         

    # conv5_3  
    with tf.name_scope('conv5_3') as scope:  
        kernel = weights[keys[24]]
        conv = tf.nn.conv2d(conv5_2, kernel, [1, 1, 1, 1], padding='SAME')  
        biases = weights[keys[25]]  
        out = tf.nn.bias_add(conv, biases)  
        conv5_3 = tf.nn.relu(out, name=scope)  
        

    # pool5  
    pool5 = tf.nn.max_pool(conv5_3,  
                            ksize=[1, 2, 2, 1],  
                            strides=[1, 2, 2, 1],  
                            padding='SAME',  
                            name='pool4')  

 
    # fc1  
    with tf.name_scope('fc1') as scope:  
        reshape = tf.reshape(pool5, shape=[batch_size, -1])  
        fc1w = weights[keys[26]]
        fc1b = weights[keys[27]]  
        #pool5_flat = tf.reshape(pool5, [-1, shape])  
        fc1l = tf.nn.bias_add(tf.matmul(reshape, fc1w), fc1b)  
        fc1 = tf.nn.relu(fc1l)  
         

    # fc2  
    with tf.name_scope('fc2') as scope:  
        fc2w = weights[keys[28]]
        fc2b = weights[keys[29]]
        fc2l = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)  
        fc2 = tf.nn.relu(fc2l)  
         

    # fc3  
    with tf.name_scope('fc3') as scope:  
        fc3w = tf.Variable(tf.truncated_normal([4096, n_classes],  
                                                        dtype=tf.float32,  
                                                        stddev=1e-1), trainable=True,name='weights')  
        fc3b = tf.Variable(tf.constant(1.0, shape=[n_classes], dtype=tf.float32),  
                                trainable=True, name='biases')  
        fc3l = tf.nn.bias_add(tf.matmul(fc2, fc3w), fc3b)
        fc3l = tf.nn.softmax(fc3l) 
        print(fc3l) 
    return  fc3l   

# def load_weights(self, weight_file, sess):  
#     weights = np.load(weight_file)  
#     keys = sorted(weights.keys())  
#     for i, k in enumerate(keys):  
#         if i not in [30,31]:  
#             print (i, k, np.shape(weights[k]))
#             sess.run(self.parameters[i].assign(weights[k]))  
#     print("-------------all done-------------")  









    


def get_files(file_dir):    
# file_dir: 文件夹路径    
# return: 乱序后的图片和标签    

    cats = []    
    label_cats = []    
    dogs = []    
    label_dogs = []    
    # 载入数据路径并写入标签值    
    for file in os.listdir(file_dir):    
        name = file.split('.')    
        if name[0] == 'cat':    
            cats.append(file_dir + file)  #修改原来的列表,增加对象  
            label_cats.append(0)    
        else:    
            dogs.append(file_dir + file)    
            label_dogs.append(1)    
    print("There are %d cats\nThere are %d dogs" % (len(cats), len(dogs)))    

    # 打乱文件顺序    
    image_list = np.hstack((cats, dogs))  #把猫放在前面,狗在后面组成一个文件  
    label_list = np.hstack((label_cats, label_dogs))    
    temp = np.array([image_list, label_list])  #生成序列  
    temp = temp.transpose()     # 转置    
    np.random.shuffle(temp)    

    image_list = list(temp[:, 0])  #下标索引,访问列表值  
    label_list = list(temp[:, 1])    
    label_list = [int(i) for i in label_list]    

    return image_list, label_list    

        
def get_batch(image,label,image_W,image_H,batch_size,capacity):  
    # 转换数据为 ts 能识别的格式  
    image = tf.cast(image,tf.string) #将image格式转化为字符串  
    label = tf.cast(label, tf.int32)  

    # 将image 和 label 放倒队列里   
    input_queue = tf.train.slice_input_producer([image,label])  
    label = input_queue[1]  
    # 读取图片的全部信息  
    image_contents = tf.read_file(input_queue[0])  
    # 把图片解码,channels =3 为彩色图片, r,g ,b  黑白图片为 1 ,也可以理解为图片的厚度  
    image = tf.image.decode_jpeg(image_contents,channels =3)  
    # 将图片以图片中心进行裁剪或者扩充为 指定的image_W,image_H  
    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)  
    # 对数据进行标准化,标准化,就是减去它的均值,除以他的方差  
    image = tf.image.per_image_standardization(image)  

    # 生成批次  num_threads 有多少个线程根据电脑配置设置  capacity 队列中 最多容纳图片的个数  tf.train.shuffle_batch 打乱顺序,  
    image_batch, label_batch = tf.train.batch([image, label],batch_size = batch_size, num_threads = 64, capacity = capacity)  

    # 重新定义下 label_batch 的形状  
    label_batch = tf.reshape(label_batch , [batch_size])  
    # 转化图片格式  
    image_batch = tf.cast(image_batch,tf.float32)  
    return  image_batch, label_batch  

def onehot(labels):  
    '''''one-hot'''  
    n_sample = len(labels)  
    n_class = max(labels)+1  
    onehot_labels = np.zeros((n_sample ,n_class))  
    onehot_labels[np.arange(n_sample),labels] = 1  
    return onehot_labels  


 
 
def losses(logits, labels):  
    with tf.variable_scope('loss') as scope:  
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
                        (logits=logits, labels=labels, name='xentropy_per_example')  
        loss = tf.reduce_mean(cross_entropy, name='loss')   
    return loss  

def trainning(loss, learning_rate):  
    with tf.name_scope('optimizer'):  
        optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)  #自适应优化器
        global_step = tf.Variable(0, name='global_step', trainable=False)  #记录全局训练步骤的单值
        train_op = optimizer.minimize(loss, global_step= global_step)  
    return train_op  

def evaluation(logits, labels):  
    with tf.variable_scope('accuracy') as scope:  
        
        # k:每个样本的预测结果的前k个最大的数里面是否包含targets预测中的标签,一般都是取1,即取预测最大概率的索引与标签对比。
        correct = tf.nn.in_top_k(logits, labels, 1)  
        correct = tf.cast(correct, tf.float16)  
        accuracy = tf.reduce_mean(correct)    
    return accuracy





def run_training():  
  

    # 获取图片和标签集
    train, train_label = get_files(file_dir)  
    # 生成批次
    train_batch, train_label_batch = get_batch(train,  
                                               train_label,  
                                               IMG_W,  
                                               IMG_H,  
                                               BATCH_SIZE,   
                                               CAPACITY)
    # 进入模型
    train_logits = network(train_batch, BATCH_SIZE, N_CLASSES) 
    # 获取 loss 
    train_loss = losses(train_logits, train_label_batch)
    # 训练 
    train_op = trainning(train_loss, learning_rate)
    # 获取准确率 
    train__acc = evaluation(train_logits, train_label_batch)  
    # 合并 summary
 
    
    
    
    
    
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())  
    ###TensorFlow提供了两个类来帮助多线程的实现:tf.Coordinator和 tf.QueueRunner。
    #从设计上这两个类必须被一起使用。Coordinator类可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常。
    #QueueRunner类用来协调多个工作线程同时将多个张量推入同一个队列中
    coord = tf.train.Coordinator()  
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)  

    try:  
        for step in np.arange(MAX_STEP):  
            if coord.should_stop():  
                    break  
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])  

            if step % 50== 0:  
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))    


    except tf.errors.OutOfRangeError:  
        print('Done training -- epoch limit reached')  
    finally:  
        coord.request_stop()
    coord.join(threads)  
    sess.close()  

# train
run_training()

         


  • 1
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值