《TensorFlow实战Google深度学习框架(第2版)》第6章练习

前言:

   第6章最后有个迁移学习的例子,我自己试着按照书上的目的从新写了一遍。

 

正文:

   代码如下:

    testC6.py:

# -- coding: utf-8 --
'''
此文件实现迁移学习的主流程。
'''
import os.path
import tensorflow as tf
import JPG_to_npy
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3
import numpy as np
import gc

#image_path = "flower_photos"    
#JPG_to_npy.JPG_to_npy(image_path)  #文件转换

ckpt_file = "D:/Backup/Documents/Visual Studio 2015/inception_v3.ckpt"
output_file = "D:/Backup/Documents/Visual Studio 2015/Projects/testC6/testC6/trained_model"  #①
trainable_scopes = ["InceptionV3/Logits", "InceptionV3/AuxLogits"]

learning_rate = 0.0001
steps = 300
batch = 32
number_of_calsses = 5
training_file_pointer = 0

non_trainable_variables = []
images = tf.placeholder(tf.float32, [None, 299, 299, 3])
labels = tf.placeholder(tf.int64, [None])


#下面两个是分段加载的训练和评估函数。这里没有给出路径参数,并且用的默认是默认路径。
def segmented_batch_training(sess, training_step, batch, training_file_pointer):
    #gc.collect()  
    training_images = []                                
    training_labels = []
    for  k in range(batch):
        try:
            processed_data = np.load("data/traning_data_"+str(training_file_pointer)+".npy")
        except:
            training_file_pointer = 0
            processed_data = np.load("data/traning_data_"+str(training_file_pointer)+".npy")
        training_images.append(processed_data[0])
        training_labels.append(processed_data[1])
        training_file_pointer += 1
    _, loss = sess.run([training_step, tf.losses.get_total_loss()],
                       feed_dict={images: training_images, labels: training_labels})
    print(loss)

#下评估函数默认每次加载50个文件以进行评估,累积结果,并且最终输出平均结果
def segmented_evaluation(sess, evaluation_step, is_validation=True, segment_size=50): 
    #gc.collect()
    file_pointer = 0
    accumulated_accuracy = 0

    if is_validation == True: 
        front_file_name = "data/validation_data_"
    else:
        front_file_name = "data/test_data_"

    while True:
        evaluation_images = []                                
        evaluation_labels = []
        for i in range(segment_size):
            try:
                processed_data = np.load(front_file_name+str(file_pointer)+".npy")
            except:
                if (file_pointer%segment_size) != 0:
                    evaluation_images.append(processed_data[0])
                    evaluation_labels.append(processed_data[1])
                    accuracy = sess.run(evaluation_step, 
                                        feed_dict={images: evaluation_images, labels: evaluation_labels})
                    accumulated_accuracy += accuracy * (file_pointer%segment_size)
                return accumulated_accuracy / file_pointer
            evaluation_images.append(processed_data[0])
            evaluation_labels.append(processed_data[1])
            file_pointer += 1
        accuracy = sess.run(evaluation_step,
                            feed_dict={images: evaluation_images, labels: evaluation_labels})
        accumulated_accuracy += accuracy * segment_size


def training_start():  #主要函数

    #下面创建inception_v3模型的结构和其所有变量。这里我没有with arg_scope,不过以后项目规模大了可能会需要用到它。
    logits, _ = inception_v3.inception_v3(images, number_of_calsses)

    for var in tf.trainable_variables():  #区分可训练和不可训练变量
            if not (var.op.name.startswith(trainable_scopes[0]) or var.op.name.startswith(trainable_scopes[1])): 
                non_trainable_variables.append(var)
            else: 
                tf.add_to_collection("trainable_variables_for_now", var)

    tf.GraphKeys.TRAINABLE_VARIABLES = "trainable_variables_for_now"  #改变之前的可训练变量集合

    load_fn=slim.assign_from_checkpoint_fn(ckpt_file, non_trainable_variables, True)

    tf.losses.softmax_cross_entropy(tf.one_hot(labels, number_of_calsses), logits)  #添加损失。原模型里已经有正则项。
    training_step = tf.train.RMSPropOptimizer(learning_rate).minimize(tf.losses.get_total_loss())  #定义优化训练步骤
    #print(tf.get_collection("losses"))  #②
    correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
    evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    load_fn(sess)

    for i in range(steps):
        segmented_batch_training(sess, training_step, batch, training_file_pointer)
        if (i+1) % 30 == 0:
            #saver.save(sess, output_file, global_step=i)  #保存checkpoint模型结构与数据
            print("%d-%dth iteration is passed with validation accuracy of %f" 
                  % (i-28, i+1, segmented_evaluation(sess, evaluation_step, True)))

    print("Final test accuracy is %f" % (segmented_evaluation(sess, evaluation_step, False)))

training_start()
'''
1,①处不能用局部路径?
2,②处只会给出交叉熵损失,并没有正则项?
3,为什么书上的准确率能达到0.9,而此代码只能达到0.3?
'''

 JPG_to_npy.py:

# -- coding: utf-8 --
'''
此文件实现JPG到npy的转换,其中对每个JPG源文件都会生成一个npy目标文件。文件名“test_data_28.npy”表示测试数据的第29个文件(第一个文件的文件号为0)。
'''
import glob
import os.path
import tensorflow as tf
import numpy as np
import gc
from tensorflow.python.platform import gfile

#下函数会由JPG文件得到npy文件
def JPG_to_npy(input_JPG_path, output_file_path = "data", validation_data_ratio = 0.1, 
               test_data_ratio = 0.1):
    file_list = []
    file_labels = []

    #获取所有文件和其标签
    sub_dirs = [x[0] for x in os.walk(input_JPG_path)]  #获取input_JPG_path下的所有子目录名
    extensions = ["jpg", "jpeg", "JPG", "JPEG"]
    current_label = 0
    for sub_dir in sub_dirs:
        if sub_dir == input_JPG_path: continue
        for extension in extensions:
            file_glob = glob.glob(sub_dir+"/*."+extension)
            file_list.extend(file_glob)   #添加文件路径到file_list
            file_labels.extend(np.ones(np.shape(file_glob))*current_label)   #添加标签到file_labels,标签与文件路径数量相同
        current_label +=1

    #打乱文件和标签
    state = np.random.get_state()
    np.random.shuffle(file_list)
    np.random.set_state(state)
    np.random.shuffle(file_labels)

    traning_count = 0
    test_count = 0
    validation_count = 0
    iteration_times = 0
    sess = tf.Session()   #获取图片数据时会用到
    for file_name in file_list:
        print("label=" + str(file_labels[iteration_times]) + "  file_path=" + file_name)   #打印当前储存的文件和标签
        image = tf.image.decode_jpeg(gfile.FastGFile(file_name, "rb").read())
        image = tf.image.resize_images(image, [299, 299])   #①为什么会有不一样的格式?
        image_value = sess.run(image)

        chance = np.random.random_sample()
        if chance < validation_data_ratio:   #②随机过程有什么意义?
            np.save(output_file_path+"/validation_data_"+str(validation_count)+".npy", 
                    np.asarray([image_value, file_labels[iteration_times]]))
            validation_count += 1
        elif chance < (validation_data_ratio + test_data_ratio):
            np.save(output_file_path+"/test_data_"+str(test_count)+".npy", 
                    np.asarray([image_value, file_labels[iteration_times]]))
            test_count += 1
        else:
            np.save(output_file_path+"/training_data_"+str(traning_count)+".npy", 
                    np.asarray([image_value, file_labels[iteration_times]]))
            traning_count += 1

        iteration_times += 1
        gc.collect()
'''
1,书上给的程序是把所有的图片数据都暂时存在了内存中。我的笔记本只有4g内存,显然这种
方式对我不合适。所以我就把其改写成:每个JPG源文件都会生成一个对应的npy目标文件,并
且生成完之后回收内存。
'''

结语:

{

    这个之前就写好了,只是忘了放出来,这次就放出(凑数)。

}

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值