深度学习TF—8.经典CNN模型—LeNet-5、VGG13、AlexNet、GoogLeNet、ResNet、DenseNet

一、LeNet-5

    1998年,LeCun发布了LeNet-5网络架构,权值共享这个词最开始是由LeNet-5模型提出来的。虽然现在大多数人认为,2012年的AlexNet网络是深度学习的开端,但是CNN的开端可以追溯到LeNet-5模型。
在这里插入图片描述

输入尺寸为32×32
卷积层为2层
池化层为2层
全连接层为2层
输出为10个类别(数字0-9的概率)
C1层为卷积层,卷积核大小为5×5,有6个卷积核(提取6种局部特征),卷积层输出大小为28×28×6,strides=1
S2层为池化层,池化框为2×2,strides=1,在减小数据量的同时也保留了有用的信息。
C3层为卷积层,卷积核大小为5×5,有16个卷积核(提取16种局部特征),卷积层输出大小为10×10×16,strides=1
S4层为池化层,池化框为2×2,strides=1,在减小数据量的同时也保留了有用的信息。
C5为全连接层,神经元个数为120个
F6为全连接层,神经元个数为84个

1.LeNet-5对cifar10分类实战

数据格式为numpy格式

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# GPU
from tensorflow.compat.v1 import ConfigProto, InteractiveSession

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, metrics, optimizers, losses, Sequential, datasets
import matplotlib.pyplot as plt

# 随机数种子
def seed_everying(SEED):
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['PYTHONHASHSEED'] = str(SEED)
    random.seed(SEED)
    np.random.seed(SEED)
    tf.random.set_seed(SEED)

seed_everying(42)

# 加载数据集
(x, y), (x_test, y_test) = datasets.cifar10.load_data()
print('shape =', x.shape, y.shape, x_test.shape, y_test.shape)
# shape = (50000, 32, 32, 3) (50000, 1) (10000, 32, 32, 3) (10000, 1)
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.int32)
x_test = tf.cast(x_test, dtype=tf.float32)
y_test = tf.cast(y_test, dtype=tf.int32)
# 分割数据集
x_train, x_val = tf.split(x, num_or_size_splits=[40000, 10000])
y_train, y_val = tf.split(y, num_or_size_splits=[40000, 10000])
x_train, x_val, x_test = x_train / 255.0, x_val / 255.0, x_test / 255.0
y_train = tf.one_hot(y_train, depth=10)
y_train = tf.reshape(y_train, (-1, 10))
y_val = tf.one_hot(y_val, depth=10)
y_val = tf.reshape(y_val, (-1, 10))
y_test = tf.one_hot(y_test, depth=10)
y_test = tf.reshape(y_test, (-1, 10))
print(y_test.shape)

# 构建网络
def LeNet_5():
    input = tf.keras.Input(shape=(32, 32, 3))
    network = Sequential([
        # 2* conv2d and pool
        layers.Conv2D(6, kernel_size=(5, 5), padding='same', activation=tf.nn.relu),
        layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=1),

        layers.Conv2D(16, kernel_size=(5, 5), padding='same', activation=tf.nn.relu),
        layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=1),
    ])
    conv = network(input)
    out = tf.reshape(conv, (-1, 32 * 32 * 16))
    # 两个全连接层
    out = layers.Dense(120, activation=tf.nn.relu)(out)
    out = layers.Dense(84, activation=tf.nn.relu)(out)
    output = layers.Dense(10, activation=tf.nn.softmax)(out)
    model = tf.keras.Model(inputs=input, outputs=output)
    return model


network = LeNet_5()
# 编译模型
network.compile(optimizer=optimizers.Adam(learning_rate=0.001),
                loss=losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy'])
# 训练模型
early_Stopping = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)
history = network.fit(x=x_train, y=y_train, batch_size=128, epochs=30,
                      validation_data=(x_val, y_val),
                      callbacks=[early_Stopping])

# 打印迭代图
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs_range = np.arange(len(acc))
fig = plt.figure(figsize=(15, 5))
fig.add_subplot()
plt.plot(epochs_range, acc, label='Train acc')
plt.plot(epochs_range, val_acc, label='Val_acc')
plt.legend(loc='upper right')
plt.title('Train and Val acc')
plt.show()

# 预测
y_pred = network.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
y_true = np.argmax(y_test, axis=1)
correct = tf.reduce_sum(tf.cast(tf.equal(y_pred,y_true),dtype=tf.int32))
print(int(correct) / len(y_test))

在这里插入图片描述
数据格式为张量格式

#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: admin
@file: 1.py
@time: 2021/02/25
@desc:
"""
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# GPU
from tensorflow.compat.v1 import ConfigProto, InteractiveSession

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, metrics, optimizers, losses, Sequential, datasets
import matplotlib.pyplot as plt

# 随机数种子
def seed_everying(SEED):
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    os.environ['PYTHONHASHSEED'] = str(SEED)
    random.seed(SEED)
    np.random.seed(SEED)
    tf.random.set_seed(SEED)

seed_everying(42)


def proprecess(x, y):
    x = 2 * tf.cast(x, dtype=tf.float32) / 255.0 - 1  # (-1,1)
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)
    # 从张量中移去尺寸为1的维度
    y = tf.squeeze(y)
    return x, y


# 加载数据集
(x, y), (x_test, y_test) = datasets.cifar10.load_data()
print('shape =', x.shape, y.shape, x_test.shape, y_test.shape)
# shape = (50000, 32, 32, 3) (50000, 1) (10000, 32, 32, 3) (10000, 1)

# 分割数据集
x_train, x_val = tf.split(x, num_or_size_splits=[40000, 10000])
y_train, y_val = tf.split(y, num_or_size_splits=[40000, 10000])
# 构建张量
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.map(proprecess).shuffle(40000).batch(128)

db_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
db_val = db_val.map(proprecess).shuffle(10000).batch(128)

db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(proprecess).batch(128)


# 构建网络
def LeNet_5():
    input = tf.keras.Input(shape=(32, 32, 3))
    network = Sequential([
        # 2* conv2d and pool
        layers.Conv2D(6, kernel_size=(5, 5), padding='same', activation=tf.nn.relu),
        layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=1),

        layers.Conv2D(16, kernel_size=(5, 5), padding='same', activation=tf.nn.relu),
        layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=1),
    ])
    conv = network(input)
    out = tf.reshape(conv, (-1, 32 * 32 * 16))
    # 两个全连接层
    out = layers.Dense(120, activation=tf.nn.relu)(out)
    out = layers.Dense(84, activation=tf.nn.relu)(out)
    output = layers.Dense(10, activation=tf.nn.softmax)(out)
    model = tf.keras.Model(inputs=input, outputs=output)
    return model


network = LeNet_5()
# 编译模型
network.compile(optimizer=optimizers.Adam(learning_rate=0.001),
                loss=losses.CategoricalCrossentropy(from_logits=True),
                metrics=['accuracy'])
# 训练模型
early_Stopping = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5)
history = network.fit(db_train, epochs=50,
                      validation_data=db_val, validation_freq=1,
                      callbacks=[early_Stopping],verbose=2)

# 打印迭代图
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs_range = np.arange(len(acc))
fig = plt.figure(figsize=(10, 8))
fig.add_subplot()
plt.plot(epochs_range, acc, label='Train acc')
plt.plot(epochs_range, val_acc, label='Val_acc')
plt.legend(loc='upper right')
plt.title('Train and Val acc')
plt.show()

# 预测
y_pred = network.predict(db_test)
y_pred = np.argmax(y_pred, axis=1)
correct = tf.reduce_sum(tf.cast(tf.equal(y_pred,tf.squeeze(y_test)),dtype=tf.int32))
print(int(correct) / len(y_test))

在这里插入图片描述

shape = (50000, 32, 32, 3) (50000, 1) (10000, 32, 32, 3) (10000, 1)
Epoch 1/100
313/313 - 8s - loss: 2.1210 - accuracy: 0.3333 - val_loss: 0.0000e+00 - val_accuracy: 0.0000e+00
Epoch 2/100
313/313 - 6s - loss: 2.0361 - accuracy: 0.4209 - val_loss: 2.0027 - val_accuracy: 0.4578
Epoch 3/100
313/313 - 6s - loss: 1.9920 - accuracy: 0.4664 - val_loss: 1.9975 - val_accuracy: 0.4600
Epoch 4/100
313/313 - 6s - loss: 1.9588 - accuracy: 0.5001 - val_loss: 1.9713 - val_accuracy: 0.4873
Epoch 5/100
313/313 - 6s - loss: 1.9379 - accuracy: 0.5201 - val_loss: 1.9473 - val_accuracy: 0.5096
Epoch 6/100
313/313 - 6s - loss: 1.9242 - accuracy: 0.5352 - val_loss: 1.9736 - val_accuracy: 0.4797
Epoch 7/100
313/313 - 6s - loss: 1.9075 - accuracy: 0.5513 - val_loss: 1.9341 - val_accuracy: 0.5234
Epoch 8/100
313/313 - 6s - loss: 1.8852 - accuracy: 0.5735 - val_loss: 1.9385 - val_accuracy: 0.5187
Epoch 9/100
313/313 - 6s - loss: 1.8695 - accuracy: 0.5908 - val_loss: 1.9218 - val_accuracy: 0.5368
Epoch 10/100
313/313 - 6s - loss: 1.8553 - accuracy: 0.6045 - val_loss: 1.8971 - val_accuracy: 0.5582
Epoch 11/100
313/313 - 6s - loss: 1.8128 - accuracy: 0.6484 - val_loss: 1.8627 - val_accuracy: 0.5949
Epoch 12/100
313/313 - 6s - loss: 1.7856 - accuracy: 0.6752 - val_loss: 1.8788 - val_accuracy: 0.5801
Epoch 13/100
313/313 - 6s - loss: 1.7691 - accuracy: 0.6921 - val_loss: 1.8632 - val_accuracy: 0.5946
Epoch 14/100
313/313 - 6s - loss: 1.7591 - accuracy: 0.7017 - val_loss: 1.8508 - val_accuracy: 0.6060
Epoch 15/100
313/313 - 6s - loss: 1.7500 - accuracy: 0.7115 - val_loss: 1.8601 - val_accuracy: 0.5976
Epoch 16/100
313/313 - 6s - loss: 1.7379 - accuracy: 0.7234 - val_loss: 1.8519 - val_accuracy: 0.6064
Epoch 17/100
313/313 - 6s - loss: 1.7302 - accuracy: 0.7308 - val_loss: 1.8653 - val_accuracy: 0.5964
Epoch 18/100
313/313 - 6s - loss: 1.7190 - accuracy: 0.7422 - val_loss: 1.8458 - val_accuracy: 0.6122
Epoch 19/100
313/313 - 6s - loss: 1.7134 - accuracy: 0.7481 - val_loss: 1.8573 - val_accuracy:<
  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值