tensorflow KEras 之Lenet/Alexnet/VGG16 网络结构

23 篇文章 0 订阅
20 篇文章 0 订阅

目录

1. Lenet 

2. Alexnet

3.VGG16

1. Lenet 

from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D
from tensorflow.keras import Model

class LeNet5(Model):
    def __init__(self):
        super(LeNet5, self).__init__()
        self.c1 = Conv2D (filters =6, kernel_size = (5,5),
            activation = 'sigmoid')
        self.p1 = MaxPool2D (pool_size = (2,2), strides = 2)
        self.c2 = Conv2D (filters =16, kernel_size = (5,5),
            activation = 'sigmoid')
        self.p2 = MaxPool2D (pool_size = (2,2), strides = 2)
        self.flatten = Flatten()
        self.f1 = Dense(120, activation ='sigmoid')
        self.f2 = Dense(84, activation ='sigmoid')
        self.f3 = Dense(10, activation ='softmax')
    def call(self, x):
        x = self.c1(x)
        x = self.p1(x)
        x = self.c2(x)
        x = self.p2(x)
        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        y = self.f3(x)
        return y

2. Alexnet

from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, BatchNormalization, Activation, Dropout
from tensorflow.keras import Model

class AlexNet8(Model):
    def __init__(self):
        super(AlexNet8, self).__init__()
        self.c1 = Conv2D(filters =96, kernel_size = (3,3) )
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.p1 = MaxPool2D(pool_size = (3,3), stride = 2)

        self.c2 = Conv2D(filters =256, kernel_size = (3,3) )
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p2 = MaxPool2D(pool_size = (3,3), stride = 2)

        self.c3 = Conv2D(filters =384, kernel_size = (3,3), padding ='same',
            activation = 'relu' )

        self.c4 = Conv2D(filters =384, kernel_size = (3,3), padding ='same',
            activation = 'relu' )

        self.c5 = Conv2D(filters =256, kernel_size = (3,3), padding ='same',
            activation = 'relu' )
        self.p3 = MaxPool2D(pool_size = (3,3), stride = 2)

        self.flatten = Flatten()
        self.f1 = Dense(2048, activation = 'relu')
        self.d1 = Dropout(0.5)

        self.f2 = Dense(2048, activation = 'relu')
        self.d2 = Dropout(0.5)

        self.f3 = Dense(10, activation = 'softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)
       
        x = self.c3(x)
        x = self.c4(x)
        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        y = self.f3(x)

        return  y

3.VGG16

from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, BatchNormalization, Activation, Dropout
from tensorflow.keras import Model

class VGG(Model):
    def __init__(self):
        self.c1 = Conv2D(filters =64, kernel_size = (3,3),padding = 'same' )
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')

        self.c2 = Conv2D(filters = 64, kernel_size =(3,3), padding = 'same')
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p2 = MaxPool2D(kernel_size = (2,2),strides = 2)
        self.d2 = Dropout(0.2)

        self.c3 = Conv2D(filters =128, kernel_size = (3,3),padding = 'same' )
        self.b3 = BatchNormalization()
        self.a3 = Activation('relu')

        self.c4 = Conv2D(filters = 128, kernel_size =(3,3), padding = 'same')
        self.b4 = BatchNormalization()
        self.a4 = Activation('relu')
        self.p4 = MaxPool2D(kernel_size = (2,2),strides = 2)
        self.d4 = Dropout(0.2)

        self.c5 = Conv2D(filters =256, kernel_size = (3,3),padding = 'same' )
        self.b5 = BatchNormalization()
        self.a5 = Activation('relu')

        self.c6 = Conv2D(filters = 256, kernel_size = (3,3),padding = 'same' )
        self.b6 = BatchNormalization()
        self.a6 = Activation('relu')

        self.c7 = Conv2D(filters = 256, kernel_size =(3,3), padding = 'same')
        self.b7 = BatchNormalization()
        self.a7 = Activation('relu')
        self.p7 = MaxPool2D(kernel_size = (2,2),strides = 2)
        self.d7 = Dropout(0.2)

        self.c8 = Conv2D(filters =512, kernel_size = (3,3),padding = 'same' )
        self.b8 = BatchNormalization()
        self.a8 = Activation('relu')

        self.c9 = Conv2D(filters = 512, kernel_size = (3,3),padding = 'same' )
        self.b9 = BatchNormalization()
        self.a9 = Activation('relu')

        self.c10 = Conv2D(filters = 256, kernel_size =(3,3), padding = 'same')
        self.b10 = BatchNormalization()
        self.a10 = Activation('relu')
        self.p10 = MaxPool2D(kernel_size = (2,2),strides = 2)
        self.d10 = Dropout(0.2)

        self.c11 = Conv2D(filters = 512, kernel_size = (3,3),padding = 'same' )
        self.b11 = BatchNormalization()
        self.a11 = Activation('relu')

        self.c12 = Conv2D(filters = 512, kernel_size = (3,3),padding = 'same' )
        self.b12 = BatchNormalization()
        self.a12 = Activation('relu')

        self.c13 = Conv2D(filters = 512, kernel_size =(3,3), padding = 'same')
        self.b13 = BatchNormalization()
        self.a13 = Activation('relu')
        self.p13 = MaxPool2D(kernel_size = (2,2),strides = 2)
        self.d13 = Dropout(0.2)

        self.f14 = Dense(512, activation = 'relu')
        self.d14 = Dropout(0.2)

        self.f15 = Dense(512, activation = 'relu')
        self.d15 = Dropout(0.2)

        self.f16 = Dense(10, activation = 'softmax')

    def call(self,x ):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)
        x = self.d2(x)

        x = self.c3(x)
        x = self.b3(x)
        x = self.a3(x)

        x = self.c4(x)
        x = self.b4(x)
        x = self.a4(x)
        x = self.p4(x)
        x = self.d4(x)
    
        x = self.c5(x)
        x = self.b5(x)
        x = self.a5(x)

        x = self.c6(x)
        x = self.b6(x)
        x = self.a6(x)

        x = self.c7(x)
        x = self.b7(x)
        x = self.a7(x)
        x = self.p7(x)
        x = self.d7(x)

        x = self.c8(x)
        x = self.b8(x)
        x = self.a8(x)

        x = self.c9(x)
        x = self.b9(x)
        x = self.a9(x)

        x = self.c10(x)
        x = self.b10(x)
        x = self.a10(x)
        x = self.p10(x)
        x = self.d10(x)

        x = self.c11(x)
        x = self.b11(x)
        x = self.a11(x)

        x = self.c12(x)
        x = self.b12(x)
        x = self.a12(x)

        x = self.c13(x)
        x = self.b13(x)
        x = self.a13(x)
        x = self.p13(x)
        x = self.d13(x)

        x = self.f14(x)
        x = self.d14(x)

        x = self.f15(x)
        x = self.d15(x)

        y = self.f16(x)

 

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
针对飞行员脑电数据进行CNN-LSTM模型进行分类时,选择模型结构的原因是为了提高分类的准确率和泛化能力。这些经典的模型结构都是在不同的数据集上进行了大量的实验和优化,具有较高的分类性能和较强的泛化能力,可以作为参考模型用于该任务。 LeNetAlexNet、VGG、ResNet等模型结构选择的具体原因如下: - LeNetLeNet是一个较早的卷积神经网络模型,适用于对小图像进行分类。在飞行员脑电数据分类任务中,LeNet可以用于提取输入数据的局部特征,从而提高分类准确率。 - AlexNetAlexNet是一个较早的深度卷积神经网络模型,具有较好的分类性能。在飞行员脑电数据分类任务中,AlexNet可以通过多层卷积和池化层提取输入数据的特征,从而提高分类准确率。 - VGG:VGG是一个非常深的卷积神经网络模型,具有较强的泛化能力。在飞行员脑电数据分类任务中,VGG可以通过多层卷积和池化层提取输入数据的特征,从而提高分类准确率。 - ResNet:ResNet是一个残差神经网络模型,具有较强的泛化能力和防止梯度消失的能力。在飞行员脑电数据分类任务中,ResNet可以通过残差模块提取输入数据的特征,从而提高分类准确率。 以下是一个使用TensorFlow实现VGG模型的代码示例: ```python # 导入相关库 import tensorflow as tf # 定义VGG模型 model = tf.keras.Sequential([ # 第一段卷积层 tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu', input_shape=input_shape), tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)), # 第二段卷积层 tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)), # 第三段卷积层 tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)), # 第四段卷积层 tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)), # 第五段卷积层 tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)), # 全连接层 tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=4096, activation='relu'), tf.keras.layers.Dense(units=4096, activation='relu'), tf.keras.layers.Dense(units=num_classes, activation='softmax') ]) # 编译模型 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # 训练模型 model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) # 评估模型 score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ``` 此外,我还找到一个相应的视频,其中讲解了如何使用TensorFlowKeras构建LeNetAlexNet、VGG、ResNet等模型进行图像分类。您可以在以下链接中查看该视频:https://www.youtube.com/watch?v=mQGwjrStQgg。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值