keras实现AlexNet、vgg13、vgg16

没有微调参数,训练睁闭眼的效果差。

数据集结构如第一篇文章(keras实现LeNet5)。

1.model.py   构造了AlexNet、vgg13、vgg16网络

#coding=utf-8
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization
from keras.layers import *
from keras.layers.advanced_activations import LeakyReLU,PReLU
from keras.models import Model
def keras_batchnormalization_relu(layer):
    BN = BatchNormalization()(layer)
    ac = PReLU()(BN)
    return ac

def AlexNet(resize=227, classes=2):
    model = Sequential()
    # 第一段
    model.add(Conv2D(filters=96, kernel_size=(11, 11),
                     strides=(4, 4), padding='valid',
                     input_shape=(227, 227, 3),
                     activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3),
                           strides=(2, 2),
                           padding='valid'))
    # 第二段
    model.add(Conv2D(filters=256, kernel_size=(5, 5),
                     strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(3, 3),
                           strides=(2, 2),
                           padding='valid'))
    # 第三段
    model.add(Conv2D(filters=384, kernel_size=(3, 3),
                     strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(Conv2D(filters=384, kernel_size=(3, 3),
                     strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(Conv2D(filters=256, kernel_size=(3, 3),
                     strides=(1, 1), padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3),
                           strides=(2, 2), padding='valid'))
    # 第四段
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
 
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
 
    model.add(Dense(1000, activation='relu'))
    model.add(Dropout(0.5))
 
    # Output Layer
    model.add(Dense(classes,activation='softmax'))
    # model.add(Activation('softmax'))
 
    return model

def vgg13(resize=224, classes=2, prob=0.5):
    model = Sequential()
    model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(224, 224, 3), padding='same', activation='relu',
                     kernel_initializer='uniform'))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(prob))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(prob))
    model.add(Dense(classes, activation='softmax'))
    return model

def vgg16(resize=224, classes=2, prob=0.5):
    model = Sequential()
    model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(224, 224, 3), padding='same', activation='relu',
                     kernel_initializer='uniform'))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(prob))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(prob))
    model.add(Dense(classes, activation='softmax'))
    return model

2.train.py   训练睁闭眼数据

import numpy as np
import keras
from keras.layers import Dense,Flatten,Dropout
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import random
import cv2
import os
from keras.layers import Input, Dense

def one_hot(data, num_classes):
  return np.squeeze(np.eye(num_classes)[data.reshape(-1)])

def load_dataset1(path,enhance_label):
    dataset = []
    labels = []
    f1=open(path+'label-shuffle.txt','r')
    for line in f1.readlines():
        file_name=line.strip().split(' ')[0]
        label=int(line.strip().split(' ')[-1])
        # print(file_name,label)
        if enhance_label==0:
            # 1.原数据加载
            pic=cv2.imread(path+file_name,1)
            pic=cv2.resize(pic,(224,224), interpolation=cv2.INTER_CUBIC)
            # pic= pic.reshape(28, 28, 1)
            dataset.append(pic)
            labels.append(label)
        if enhance_label==1:
            # 2.数据随机增强后加载
            pic=random_enhance(path,file_name)
            dataset.append(pic)
            labels.append(label)

    dataset=np.array(dataset)
    labels=np.array(labels)
    labels=one_hot(labels, 2)
    return dataset, labels



# 搭建网络
inputs = Input(shape=(224, 224, 3))
model = model.vgg16(inputs, classes=2)
model.compile(
          optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0),
          metrics=['accuracy'],   #评价指标
          loss='categorical_crossentropy')   #计算损失---分类交叉熵函数,  binary_crossentropy(二分类)

train_path='E:/eye_dataset/data2/train/'
test_path='E:/eye_dataset/data2/test/'

enhance_label=0
train_data,train_label=load_dataset1(train_path,enhance_label)
test_data,test_label=load_dataset1(test_path,enhance_label)

train_data = train_data.reshape(-1, 224, 224, 3)  # normalize
test_data = test_data.reshape(-1, 224, 224, 3)  # normalize
print(train_data.shape,train_label.shape)


# 训练方法二
models = model.fit(
        train_data,
        train_label,
        batch_size=64,
        epochs=2,
        verbose=1,
        shuffle=True,
        initial_epoch=0,   #从指定的epoch开始训练,在这之前的训练时仍有用。
        validation_split=0.1   #0~1之间,用来指定训练集的一定比例数据作为验证集
        # validation_data=(test_data, test_label)   #指定的验证集,此参数将覆盖validation_spilt。
)

log_dir="model/"
model.save(log_dir+'m2.h5')   #保存最后一次迭代的模型
model.save_weights(log_dir+'m1.h5')

3.train2.py  训练cifar数据集

import numpy as np
import keras
from keras.layers import Dense,Flatten,Dropout
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import random
import cv2
import os
from keras.layers import Input, Dense

import numpy as np
import os
import keras

#网站给出的读取CIFAR10的函数
def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')

    X = dict[b'data']
    Y = dict[b'labels']
    #将(10000,3072)转变成为图片格式,其中转置函数是为了符合通道在最后的位置
    X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float")
    Y = np.array(Y)
    return X,Y
 
#输入文件目录,输出数据集的训练集数据和标签,测试集数据和标签
def load_cifar(ROOT):
    xs = []
    ys = []
    for b in range(1,6):
        f = os.path.join(ROOT,'data_batch_%d' % b)
        X ,Y = unpickle(f)
        xs.append(X)
        ys.append(Y)

    Xtr = np.concatenate(xs)
    Ytr = np.concatenate(ys)
    del X,Y
    Xte,Yte = unpickle(os.path.join(ROOT,'test_batch'))

    return Xtr,Ytr,Xte,Yte

path='data2/cifar-10-batches-py/'
num_classes=10
train_data,train_label,test_data,test_label=load_cifar(path)
# 多分类标签生成
train_label = keras.utils.to_categorical(train_label, num_classes)
test_label = keras.utils.to_categorical(test_label, num_classes)

print(train_data.shape,train_label.shape)



# train_data = train_data.reshape(-1, 224, 224, 3)  # normalize
# test_data = test_data.reshape(-1, 224, 224, 3)  # normalize
# print(train_data.shape,train_label.shape)

# 搭建网络
inputs = Input(shape=(224, 224, 3))
model = model.vgg16(inputs, classes=10)
model.compile(
          optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0),
          metrics=['accuracy'],   #评价指标
          loss='categorical_crossentropy')   #计算损失---分类交叉熵函数,  binary_crossentropy(二分类)

# 训练方法二
models = model.fit(
        train_data,
        train_label,
        batch_size=64,
        epochs=2,
        verbose=1,
        shuffle=True,
        initial_epoch=0,   #从指定的epoch开始训练,在这之前的训练时仍有用。
        validation_split=0.1   #0~1之间,用来指定训练集的一定比例数据作为验证集
        # validation_data=(test_data, test_label)   #指定的验证集,此参数将覆盖validation_spilt。
)

log_dir="model/"
model.save(log_dir+'m2.h5')   #保存最后一次迭代的模型
model.save_weights(log_dir+'m1.h5')

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值