最基础的tensorflow代码学习方法———多敲多理解

我最近在学习tensorflow实战深度学习框架,发现书上的部分代码看起来很简单,理论部分也比较基础。可是当我自己想要敲点代码的时候,发现怎么也敲不出来,于是我反思,到底哪里出了问题。原来我为了求快,迅速的把tensorflow的教程和深度学习的书本看了一遍,只是基本的知道了里面的理论和原理,具体的代码细节并不理解。这犯了初学者的大忌:只看不动手实践。
程序员的代码学习方法——多敲多理解,每学一件新的东西(算法、程序),必须要把它敲出来,只有敲出来,理解了才是你的东西。不然哪怕你copy过来实现了基本功能,你也不理解细节。
像以下最为基础的猫狗分类代码,如果不自己亲自写一遍,对于其中的细节也只能停留在表面的理解上,永远得不到具体的理解。

# -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 19:27:26 2021
cat_VS_dag  猫狗分类识别
@author: chenfeng
"""

import tensorflow as tf	
from tensorflow import keras
import numpy as np
import os
import random

num_epochs = 100					#遍历数据集的次数
num_class = 2						#类别数
num_train = 23000					#训练样本总数
num_test = 2000						#测试样本总数
input_rows,input_cols = 100,100		#输入图片尺寸
batch_size = 32						#批次大小
learning_rate = 0.01				#初始学习率
loss_value = []						#记录损失值

num_parallel_calls = 4
#================================================================#
#=注意!!!num_parallel_calls是数据增强使使用的CPU核心数,根据实际调整=#
#================================================================#

DATA_DIR = 'E:/UserData/Desktop/catVSdog_dataSet/'		#数据根目录
LABLE = {0:'cats',1:'dogs'}				#分类标签
#'F:/data/cats-and-dogs/train/cats'		#猫的训练样本路径,其余类似


#============================  model  ============================#
#=================================================================#

def CNN():
    model = keras.Sequential()

    #block1
    
    model.add(keras.layers.Conv2D(32,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu,input_shape = (input_rows,input_cols,3)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Conv2D(32,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPool2D(pool_size = [2,2]))
    model.add(keras.layers.Dropout(0.2))
    
    #block2
    
    model.add(keras.layers.Conv2D(64,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Conv2D(64,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPool2D(pool_size = [2,2]))
    model.add(keras.layers.Dropout(0.2))
    
    #block3
    
    model.add(keras.layers.Conv2D(128,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Conv2D(128,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPool2D())
    model.add(keras.layers.Dropout(0.2))
    
    #block4
    
    model.add(keras.layers.Conv2D(256,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Conv2D(256,[3,3],padding = 'same',kernel_initializer='he_normal',activation = tf.nn.elu))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.MaxPool2D())
    model.add(keras.layers.Dropout(0.5))
    
    #block5
    
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(64,activation = tf.nn.elu,kernel_initializer='he_normal'))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Dropout(0.5))
    
    #block6
    
    model.add(keras.layers.Dense(32,activation = tf.nn.elu,kernel_initializer='he_normal'))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Dropout(0.5))

    #block7
    
    model.add(keras.layers.Dense(num_class,activation = tf.nn.softmax,kernel_initializer='he_normal'))
    print(model.summary(),'\n')
    return model

#=============================  data  ============================#
#=================================================================#


 

def data_set(DATA_DIR,option):			#根据option是train还是test加载对应文件夹的数据
    if option == 'train':
        k = num_train
    else:
        k = num_test
    Filenames = []
    Lable = np.zeros(shape = (k,1))
    i = 0
    for lable in LABLE:					#lable是0即进入cats文件夹,是1则进入dogs文件夹
        for filename in os.listdir(DATA_DIR + option + '/' +  LABLE[lable]):
            Filenames.append(DATA_DIR + option + '/' + LABLE[lable] + '/' + filename)
            Lable[i] = (lable)
            i += 1
    return Filenames,Lable 				#返回图片地址list和对应标签np数组

def map_fun(filename,lable):			#数据增强函数,包含旋转,镜像,翻转
    image_string = tf.io.read_file(filename)
    image = tf.image.decode_jpeg(image_string,channels =3)
    									#必须要进行上面两步,将图像地址转换为图像数据
    rand = random.random()
    if rand > 0.5:
        image = tf.image.rot90(image)
        
    image = tf.image.random_flip_left_right(image)
    image = tf.image.random_flip_up_down(image)
    
    image = image / 255					#数据处理为[0,1]区间的值,据说有利于计算和收敛
    
    return image,lable 

#下面是main中的内容,给搬过来了
#========================  funs and main  ========================#
#=================================================================#
TRAN_Filenames,TRAN_Lable = data_set(DATA_DIR,'train')
tran_dataset = tf.data.Dataset.from_tensor_slices((TRAN_Filenames,TRAN_Lable))
										#上面这步需要注意注意再注意!!!一定是(TRAN_Filenames,TRAN_Lable),带上括号作为参数,不然,嘿嘿,报错都找不见在哪
tran_dataset = tran_dataset.map(map_func = map_fun,num_parallel_calls = num_parallel_calls)
tran_dataset = tran_dataset.shuffle(23000)
tran_dataset = tran_dataset.batch(batch_size)
tran_dataset = tran_dataset.prefetch(tf.data.experimental.AUTOTUNE)

TEST_Filenames,TEST_Lable = data_set(DATA_DIR,'test')
test_dataset = tf.data.Dataset.from_tensor_slices((TEST_Filenames,TEST_Lable))
test_dataset = test_dataset.map(map_func = map_fun,num_parallel_calls=num_parallel_calls)
test_dataset = test_dataset.batch(batch_size)


#========================  funs and main  ========================#
#=================================================================#


class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self,logs = {}):
        self.losses = []
        
    def on_batch_end(self,batch,logs = {}):
        self.losses.append(logs.get('loss'))



if __name__ == '__main__':
    TRAN_Filenames,TRAN_Lable = data_set(DATA_DIR,'train')
    tran_dataset = tf.data.Dataset.from_tensor_slices((TRAN_Filenames,TRAN_Lable))
    tran_dataset = tran_dataset.map(map_func = map_fun,num_parallel_calls=num_parallel_calls)
    tran_dataset = tran_dataset.shuffle(23000)
    tran_dataset = tran_dataset.batch(batch_size)
    tran_dataset = tran_dataset.prefetch(tf.data.experimental.AUTOTUNE)

    TEST_Filenames,TEST_Lable = data_set(DATA_DIR,'test')
    test_dataset = tf.data.Dataset.from_tensor_slices((TEST_Filenames,TEST_Lable))
    test_dataset = test_dataset.map(map_func = map_fun,num_parallel_calls=num_parallel_calls)
    test_dataset = test_dataset.batch(batch_size)
#================上面是data相关处理================#
	
    model = CNN()	#赋予模型
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
        					loss=tf.keras.losses.sparse_categorical_crossentropy,
        					metrics=[tf.keras.metrics.sparse_categorical_accuracy])
    				#使用compile函数定义优化器optimizer,详见资料1
    				
    				#以下四个参数与callbacks的使用有关
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss',
                                                  factor = 0.2,
                                                  patience = 4, 
                                                  verbose = 1,
                                                  min_delta = 0.00001)
                    #动态调整学习率
    earlystop = keras.callbacks.EarlyStopping(monitor = 'val_loss',
                                              min_delta = 0,
                                              patience = 10,
                                              verbose = 1,
                                              restore_best_weights = True)
                    #当模型不在进步时提前结束训练                          
    history = LossHistory()
    				#通过LossHistory将训练loss值保存history中
    checkpoint = keras.callbacks.ModelCheckpoint('best_model.h5',
                                                 monitor = 'val_loss',
                                                 mode = 'min',
                                                 save_best_only = True,
                                                 verbose = 1)
                   	#查看当前模型是否最优,只保存训练中最优的模型
    callbacks = [history,reduce_lr,earlystop,checkpoint]
    				#将需要参数打包成list
    model.fit(tran_dataset,batch_size = batch_size,epochs = num_epochs, callbacks = callbacks,validation_data = test_dataset)
    				#训练模型,设置callbacks,validation_data = test_dataset可以在一个epoch后用测试集进行测试,
    				#上面reduce_lr等中的'val_loss'就是测试集的loss,以此作为保存最优模型,和调整学习率的指标
    #tf.saved_model.save(model, "F:/data/cats-and-dogs")



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值