1DCNN 2DCNN LeNet5,VGGNet16使用tensorflow2.X实现

1DCNN是1维卷积
2DCNN是两层卷积,+池化层
leNet5是两段卷积层+池化层,最后加三层全连接层
VGGNet16总共分为八段:
在这里插入图片描述

from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow import keras

def LeNet_CNNmodel():
    model = keras.models.Sequential([
        layers.Conv2D(filters=64, kernel_size=(3, 3),padding='same',input_shape=(16, 16, 1), activation='relu'),
        layers.MaxPooling2D(pool_size=(2, 2), padding = 'same' ),
        layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same',activation='relu'),
        layers.MaxPooling2D(pool_size=(2, 2), padding = 'same' ),
        #layers.Dropout(0.25),
        #(5,5,16) > 400
        layers.Flatten(),
        layers.Dense(256, activation='relu'),
        #layers.Dropout(0.5),
        #layers.Dense(84, activation='relu'),
        layers.Dense(128, activation='relu'),
        #layers.Dropout(0.5),
        layers.Dense(12, activation='softmax')
    ])
# Compile model
    model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
    return model
#,class_weight=class_weight
def LeNet_CNN():
    t1 = time.time()
    model = LeNet_CNNmodel()
    X_train = tf.reshape(train_x,[-1,16,16,1])
    X_test =  tf.reshape(test_x, [-1,16,16,1])
    model.summary()
    history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)
    scores = model.evaluate(X_test, test_y, verbose=0)
    t2 = time.time()
    pred_y = model.predict(X_test)
    print(scores)
    print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)
    print(history.history)
    return scores,pred_y
#simple_CNN()

def oneD_cNNmodel():
    model = keras.models.Sequential([
        layers.Conv1D(50,7, input_shape = (32,8),activation='relu'),
        layers.MaxPooling1D(3),
        layers.Conv1D(50, 7, input_shape=(32, 8), activation='relu'),
        layers.GlobalAveragePooling1D(),
        #layers.Dropout(0.5),

        layers.Dense(12, activation='softmax')
    ])
    model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
    return model

def oneD_cNN():
    t1 =time.time()
    model = oneD_cNNmodel()
    X_train = tf.reshape(train_x,[-1,32,8])
    X_test =  tf.reshape(test_x, [-1,32,8])
    model.summary()
    history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128)
    scores = model.evaluate(X_test, test_y, verbose=0)
    t2 = time.time()
    pred_y = model.predict(X_test)
    print(scores)
    print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)
    print(history.history)
    return scores,pred_y

def two_CNNmodel():
    model = keras.models.Sequential([
        layers.Conv2D(64, kernel_size=(3, 3),padding='same',input_shape=(16, 16, 1), activation='relu'),
        layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        layers.Flatten(),
        layers.Dense(128, activation='relu'),
        layers.Dropout(0.5),
        layers.Dense(12, activation = 'softmax')
    ])
    model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
    return model
def twoD_CNN():
    t1 = time.time()
    model = two_CNNmodel()
    X_train = tf.reshape(train_x,[-1,16,16,1])
    X_test =  tf.reshape(test_x, [-1,16,16,1])
    model.summary()
    history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)
    scores = model.evaluate(X_test, test_y, verbose=0)
    t2 = time.time()
    pred_y = model.predict(X_test)
    print(scores)
    print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)
    print(history.history)
    return scores,pred_y


def VGGNet16_model():
    model = keras.models.Sequential([
        layers.Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(16, 16, 1)),
        layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        # block 2
        layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        #block3
        layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        #block4
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        # block5
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same'),
        layers.MaxPooling2D(pool_size=(2, 2), padding='same'),
        # layers.Dropout(0.25),
        # (5,5,16) > 400
        layers.Flatten(),
        layers.Dense(256, activation='relu'),
        # layers.Dropout(0.5),
        # layers.Dense(84, activation='relu'),
        layers.Dense(128, activation='relu'),
        # layers.Dropout(0.5),
        layers.Dense(12, activation='softmax')
    ])
    # Compile model
    model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
    return model

def VGGNet16():
    t1 = time.time()
    model = VGG16_Model()
    X_train = tf.reshape(train_x,[-1,16,16,1])
    X_test =  tf.reshape(test_x, [-1,16,16,1])
    model.summary()
    history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)
    scores = model.evaluate(X_test, test_y, verbose=0)
    t2 = time.time()
    pred_y = model.predict(X_test)
    print(scores)
    print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)
    print(history.history)
    return scores,pred_y

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值