【tf-keras】CIFAR10数据集--图像分类准确率达到89.8%--整体分类过程详细分析

在构建深度学习过程最重要的步骤为:

(1)模型 (2)损失函数 (3)优化器选择
本博客将会从上述步骤,以CIFAR10数据集为代表,利用keras展开描述整体过程;

import tensorflow as tf
import numpy as np 
import pandas as pd

from subprocess import check_output
import numpy as np
import matplotlib.pyplot as plt

print(tf.__version__)

导入CIFAR10数据集

from keras.datasets import cifar10
from keras.datasets import cifar10
from keras.utils import to_categorical

(train_images_cifar, train_labels_cifar), (test_images_cifar, test_labels_cifar) = cifar10.load_data()
train_images_cifar = train_images_cifar.reshape((50000, 32, 32, 3))
train_images_cifar = train_images_cifar.astype('float32') / 255

test_images_cifar = test_images_cifar.reshape((10000, 32, 32, 3))
test_images_cifar = test_images_cifar.astype('float32') / 255

train_labels_cifar = to_categorical(train_labels_cifar)
test_labels_cifar = to_categorical(test_labels_cifar)

训练集测试集划分

train_images_cifar.shape
X_train = train_images_cifar[0:45000]
X_val = train_images_cifar[45000:]
y_train = train_labels_cifar[0:45000]
y_val = train_labels_cifar[45000:]
X_test = test_images_cifar
y_test = test_labels_cifar
print("X_train.shape:",X_train.shape)
print("X_val.shape:",X_val.shape)
print("X_test.shape:",X_test.shape)

探索数据

import random
for i in range(0,6):
    plt.subplot(1,6,i+1)
    test_im = X_train[random.randint(0,600)]
    plt.imshow(test_im.reshape(32,32, 3), cmap='viridis', interpolation='none')
    plt.tight_layout()

标准化

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_val = X_val.astype('float32')
X_train /= 255
X_test /= 255
X_val /= 255

模型参数

batch_size = 128
num_classes = 10
epochs = 100

建立模型(Model1:DenseNet121)-- 85.24%

尝试过mobilenet/mobilenetv2/resnet50,最后DenseNet121表现最好,结果为85.24%
import keras
from keras.layers import Input
from keras.applications.densenet import DenseNet121

input_tensor = Input(shape = (32, 32, 3))
model = DenseNet121(input_tensor=input_tensor, weights=None, include_top=True, classes=10)
1

model.summary()

建立模型(Model2:自己设计模型)–89.83%

考虑到上述模型的尝试结果,认为模型参数量在40M以下的模型表现较好,考虑实现不太复杂的模型,因此基于resnet18设计模型
import keras
from keras.models import Model
from keras.layers import Input, Dense, Dropout, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, \
    Activation, ZeroPadding2D
from keras.layers import add, Flatten

def Conv2d_BN(x, output_filter, kernel_size, strides=(1, 1), padding='same', name=None):
    if name is not None:
        bn_name = name + '_bn'
        conv_name = name + '_conv'
    else:
        bn_name = None
        conv_name = None
    
    x = Conv2D(output_filter, kernel_size, padding=padding, strides=strides, activation='relu', name=conv_name)(x)
    x = BatchNormalization(axis=3, name=bn_name)(x)
    return x

def Block(inpt, output_filter, kernel_size, strides=(1,1), padding='same',  with_conv_shortcut=False):
    x = Conv2d_BN(inpt, output_filter=output_filter, kernel_size=kernel_size, strides=strides, padding=padding)
    x = Conv2d_BN(x, output_filter=output_filter, kernel_size=kernel_size, padding=padding)
    if with_conv_shortcut:
        shortcut = Conv2d_BN(inpt, output_filter=output_filter, kernel_size=kernel_size, strides=strides, padding=padding)
        x = add([x, shortcut])
        return x
    else:
        x = add([x, inpt])
        return x

input_tensor = Input(shape = (32, 32, 3))

# conv1
x = Conv2d_BN(input_tensor, 64, (3, 3), (2, 2), padding='same')
# x = MaxPooling2D((3, 3), (2, 2), padding='same')(x)

# conv2
x = Block(x, output_filter=64, kernel_size=(3, 3))
x = Block(x, output_filter=64, kernel_size=(3, 3))

#conv3
x = Block(x, output_filter=128, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = Block(x, output_filter=128, kernel_size=(3, 3))

#conv4
x = Block(x, output_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = Block(x, output_filter=256, kernel_size=(3, 3))

#conv5
x = Block(x, output_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
x = Block(x, output_filter=512, kernel_size=(3, 3))

x = AveragePooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(10, activation='softmax')(x)
    
model = Model(inputs=input_tensor, outputs=x)
model.summary()

编译模型

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(lr=0.001),
              metrics=['accuracy'])

Tensorboard的使用

# keras 进阶用法
from keras.callbacks import TensorBoard
from keras.callbacks import ReduceLROnPlateau

reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=10, mode='auto')

tbCallBack = TensorBoard(log_dir='./logs/',  # log 目录
                 histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
#                  batch_size=32,     # 用多大量的数据计算直方图
                 write_graph=True,  # 是否存储网络结构图
                 write_grads=True, # 是否可视化梯度直方图
                 write_images=True,# 是否可视化参数
                 embeddings_freq=0, 
                 embeddings_layer_names=None, 
                 embeddings_metadata=None)

data augmentation + 训练模型

from keras.preprocessing.image import ImageDataGenerator
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
    width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=True,  # randomly flip images
    vertical_flip=False)  # randomly flip images

# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)

# Fit the model on the batches generated by datagen.flow().
history = model.fit_generator(datagen.flow(X_train, y_train,
                                           batch_size=batch_size),
                              steps_per_epoch=X_train.shape[0] // batch_size,
                              epochs=epochs,
                              validation_data=(X_val, y_val),
                              callbacks=[reduce_lr])

模型效果

score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

acc and loss:

import matplotlib.pyplot as plt
%matplotlib inline
accuracy = history.history['acc']
val_accuracy = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值