keras实现LeNet5

检测睁闭眼的准确都:99.23%。

1.数据集

2.train.py

import numpy as np
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import random
import cv2
import os

def one_hot(data, num_classes):
  return np.squeeze(np.eye(num_classes)[data.reshape(-1)])

def load_dataset1(path,enhance_label):
    dataset = []
    labels = []
    f1=open(path+'label-shuffle.txt','r')
    for line in f1.readlines():
        file_name=line.strip().split(' ')[0]
        label=int(line.strip().split(' ')[-1])
        # print(file_name,label)
        if enhance_label==0:
            # 1.原数据加载
            pic=cv2.imread(path+file_name,0)
            pic=cv2.resize(pic,(28,28), interpolation=cv2.INTER_CUBIC)
            # pic= pic.reshape(28, 28, 1)
            dataset.append(pic)
            labels.append(label)
        if enhance_label==1:
            # 2.数据随机增强后加载
            pic=random_enhance(path,file_name)
            dataset.append(pic)
            labels.append(label)

    dataset=np.array(dataset)
    labels=np.array(labels)
    labels=one_hot(labels, 2)
    return dataset, labels


train_path='data2/train/'
test_path='data2/test/'

enhance_label=0
train_data,train_label=load_dataset1(train_path,enhance_label)
test_data,test_label=load_dataset1(test_path,enhance_label)

train_data = train_data.reshape(-1, 28, 28, 1)  # normalize
test_data = test_data.reshape(-1, 28, 28, 1)  # normalize
print(train_data.shape,train_label.shape)


# 搭建网络
model = Sequential()
model.add(Conv2D(input_shape=(28, 28, 1), kernel_size=(5, 5), filters=20, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

model.compile(
          optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0),
          metrics=['accuracy'],   #评价指标
          loss='categorical_crossentropy')   #计算损失---分类交叉熵函数,  binary_crossentropy(二分类)

# 训练方法二
models = model.fit(
        train_data,
        train_label,
        batch_size=64,
        epochs=2,
        verbose=1,
        shuffle=True,
        initial_epoch=0,   #从指定的epoch开始训练,在这之前的训练时仍有用。
        validation_split=0.1   #0~1之间,用来指定训练集的一定比例数据作为验证集
        # validation_data=(test_data, test_label)   #指定的验证集,此参数将覆盖validation_spilt。
)


# # 训练、预测
# print('Training')
# model.fit(train_data, train_label, epochs=50, batch_size=32)

log_dir="model/"
model.save(log_dir+'m2.h5')   #保存最后一次迭代的模型
model.save_weights(log_dir+'m1.h5')

3.test.py

from keras.preprocessing.image import load_img
from keras.models import Sequential
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.models import load_model
import tensorflow as tf
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import cv2


#预测结果返回0、1
def get_result(pre):
    if pre[0][0]>pre[0][1]:
        return 0
    else:
        return 1


#按标签批量预测
def get_acc(model, path):
    n=0
    total=0
    f1 = open(path + 'label-shuffle.txt', 'r')
    for line in f1.readlines():
        total += 1
        file_name=line.strip().split(' ')[0]
        label=int(line.strip().split(' ')[-1])
        # print(file_name,label)

        # # keras加载图片
        # img = load_img(path + file_name, target_size=(224, 224))
        # # img = image.img_to_array(img) / 255.0
        # img = np.expand_dims(img, axis=0)

        # opencv加载图片
        img=cv2.imread(path + file_name,0)
        img=cv2.resize(img,(28,28), interpolation=cv2.INTER_CUBIC)
        img = img.reshape(28, 28, 1)  # normalize
        # img = image.img_to_array(img) / 255.0
        img = np.expand_dims(img, axis=0)
        img=np.array(img)
        
        predictions = model.predict(img)
        result = get_result(predictions)
        print("pre_value:", result,predictions,'---'+str(total))

        if result==label:
            n += 1
    acc=n/total
    print("acc:",acc)
    return acc

# 搭建网络
model = Sequential()
model.add(Conv2D(input_shape=(28, 28, 1), kernel_size=(5, 5), filters=20, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

model.compile(
          optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0),
          metrics=['accuracy'],   #评价指标
          loss='categorical_crossentropy')   #计算损失---分类交叉熵函数,  binary_crossentropy(二分类)



model.load_weights('model/m2.h5')

# 批量预测
path='E:/eye_dataset/test/eye/'
path2='data2/test/'
acc=get_acc(model,path2)
print("pre_acc:",acc)

4.transform.py  模型转换

#*-coding:utf-8-*

"""
将keras的.h5的模型文件,转换成TensorFlow的pb文件
"""
# ==========================================================
# from keras.preprocessing.image import load_img
from keras.models import Sequential
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.models import load_model
import tensorflow as tf
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import cv2
import keras
from keras.models import load_model
import tensorflow as tf
import os
from keras import backend
from keras.applications.mobilenetv2 import MobileNetV2
from keras.layers import Input
from keras.preprocessing import image
from keras.applications.mobilenetv2 import preprocess_input, decode_predictions
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.framework import graph_util, graph_io
from keras.models import Sequential
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_", log_tensorboard=True):
    if os.path.exists(output_dir) == False:
        os.mkdir(output_dir)

    out_nodes = []
    for i in range(len(h5_model.outputs)):
        out_nodes.append(out_prefix + str(i + 1))
        tf.identity(h5_model.output[i], out_prefix + str(i + 1))
    sess = backend.get_session()

    # 写入pb模型文件
    init_graph = sess.graph.as_graph_def()
    main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
    graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)

    # # 输出日志文件
    # if log_tensorboard:
    #     from tensorflow.python.tools import import_pb_to_tensorboard
    #     import_pb_to_tensorboard.import_to_tensorboard(os.path.join(output_dir, model_name), output_dir)


if __name__ == '__main__':
    #  .h模型文件路径参数
    input_path = 'model/'
    weight_file = 'm2.h5'
    weight_file_path = os.path.join(input_path, weight_file)

    output_dir = input_path
    output_graph_name = 'test2.pb'


    # # 加载方法1:疑似load_model有问题,权重未加载进来
    # h5_model = load_model(weight_file_path)
    # 搭建网络
    model = Sequential()
    model.add(Conv2D(input_shape=(28, 28, 1), kernel_size=(5, 5), filters=20, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

    model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

    model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

    model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

    model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

    model.add(Flatten())
    model.add(Dense(1000, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
    # 加载方法2
    model.load_weights(weight_file_path)

    model.summary()
    h5_to_pb(model, output_dir=output_dir, model_name=output_graph_name)
    print('Transform success!')

5.test_pb.py

import tensorflow as tf
import numpy as np
import cv2
import os
from keras.preprocessing.image import load_img

def get_result(pre):
    if pre[0]>pre[1]:
        return 0
    else:
        return 1

pb_path = 'model/test2.pb'
with tf.gfile.FastGFile(pb_path,'rb') as model_file:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(model_file.read())
    tf.import_graph_def(graph_def, name='')
    # print(graph_def)

with tf.Session() as sess:
    tf.global_variables_initializer()
    input_1 = sess.graph.get_tensor_by_name("conv2d_1_input:0")
    pred = sess.graph.get_tensor_by_name("output_1:0")


    n=0
    total=0
    path='E:/eye_dataset/test/eye/'
    path2='data2/test/'
    
    f1 = open(path2 + 'label-shuffle.txt', 'r')
    for line in f1.readlines():
        total += 1
        file_name=line.strip().split(' ')[0]     #文件名
        label=int(line.strip().split(' ')[-1])   #标签

        # opencv加载图片
        img=cv2.imread(path2 + file_name,0)
        img=cv2.resize(img,(28,28), interpolation=cv2.INTER_CUBIC)
        img = img.reshape(28, 28, 1)  # normalize
        # img = image.img_to_array(img) / 255.0
        img = np.expand_dims(img, axis=0)
        img=np.array(img)

        pre = sess.run(pred, feed_dict={input_1: img})
        result = get_result(pre)
        print("pre_value:", result,pre,'---'+str(total))

        if result==label:
            n += 1
    acc=n/total
    print("acc:",acc)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值