keras实现人脸关键点检测

原文:https://www.bbsmax.com/A/MAzAjNORJ9/

数据集:https://pan.baidu.com/s/1cnAxJJmN9nQUVYj8w0WocA

第一步:准备好需要的库

  • tensorflow  1.4.0
  • h5py 2.7.0
  • hdf5 1.8.15.1
  • Keras     2.0.8
  • opencv-python     3.3.0
  • numpy    1.13.3+mkl

第二步:准备数据集:

我对每一张图像进行了剪裁,使图像的大小为178*178的正方形。

并且对于原有的lable进行了优化

第三步:将图片和标签转成numpy array格式:

参数

trainpath = 'E:/pycode/facial-keypoints-master/data/50000train/'
testpath = 'E:/pycode/facial-keypoints-master/data/50000test/'
imgsize = 178
train_samples =40000
test_samples = 200
batch_size = 32
def __data_label__(path):
     f = open(path + "lable-40.txt", "r")
     j = 0
     i = -1
     datalist = []
     labellist = []
     while True:
 
         for line in f.readlines():
             i += 1
             j += 1
             a = line.replace("\n", "")
             b = a.split(",")
             lable = b[1:]
             # print(b[1:])
             #对标签进行归一化(不归一化也行)
             # for num in b[1:]:
             #     lab = int(num) / 255.0
             #     labellist.append(lab)
             # lab = labellist[i * 10:j * 10]
             imgname = path + b[0]
             images = load_img(imgname)
             images = img_to_array(images).astype('float32')
             # 对图片进行归一化(不归一化也行)
             # images /= 255.0
             image = np.expand_dims(images, axis=0)
             lables = np.array(lable)
 
             # lable =keras.utils.np_utils.to_categorical(lable)
             # lable = np.expand_dims(lable, axis=0)
             lable = lables.reshape(1, 10)
        #这里使用了生成器
             yield (image,lable)

第四步:搭建网络:

这里使用非常简单的网络

     def __CNN__(self):
         model = Sequential()#178*178*3
         model.add(Conv2D(32, (3, 3), input_shape=(imgsize, imgsize, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Conv2D(32, (3, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Conv2D(64, (3, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Flatten())
         model.add(Dense(64))
         model.add(Activation('relu'))
         model.add(Dropout(0.5))
         model.add(Dense(10))
         return model
 #因为是回归问题,抛弃了softmax

_________________________________________________________________
Layer (type) Output Shape Param # 
=================================================================
conv2d_1 (Conv2D) (None, 176, 176, 32) 896 
_________________________________________________________________
activation_1 (Activation) (None, 176, 176, 32) 0 
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 88, 88, 32) 0 
_________________________________________________________________
conv2d_2 (Conv2D) (None, 86, 86, 32) 9248 
_________________________________________________________________
activation_2 (Activation) (None, 86, 86, 32) 0 
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 43, 43, 32) 0 
_________________________________________________________________
conv2d_3 (Conv2D) (None, 41, 41, 64) 18496 
_________________________________________________________________
activation_3 (Activation) (None, 41, 41, 64) 0 
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 20, 20, 64) 0 
_________________________________________________________________
flatten_1 (Flatten) (None, 25600) 0 
_________________________________________________________________
dense_1 (Dense) (None, 64) 1638464 
_________________________________________________________________
activation_4 (Activation) (None, 64) 0 
_________________________________________________________________
dropout_1 (Dropout) (None, 64) 0 
_________________________________________________________________
dense_2 (Dense) (None, 10) 650 
=================================================================
Total params: 1,667,754
Trainable params: 1,667,754
Non-trainable params: 0
_________________________________________________________________

第五步:训练网络:

def train(model):
     # print(lable.shape)
     model.compile(loss='mse', optimizer='adam')
     # optimizer = SGD(lr=0.03, momentum=0.9, nesterov=True)
     # model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
     epoch_num = 14
     learning_rate = np.linspace(0.03, 0.01, epoch_num)
     change_lr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
     early_stop = EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto')
     check_point = ModelCheckpoint('CNN_model_final.h5', monitor='val_loss', verbose=0, save_best_only=True,
                                   save_weights_only=False, mode='auto', period=1)
 
     model.fit_generator(__data_label__(trainpath),callbacks=[check_point,early_stop,change_lr],samples_per_epoch=int(train_samples // batch_size),
                         epochs=epoch_num,validation_steps = int(test_samples // batch_size),validation_data=__data_label__(testpath))
 
     # model.fit(traindata, trainlabel, batch_size=32, epochs=50,
     #           validation_data=(testdata, testlabel))
     model.evaluate_generator(__data_label__(testpath),steps=10)
 
 def save(model, file_path=FILE_PATH):
     print('Model Saved.')
     model.save_weights(file_path)
 
 def predict(model,image):
     # 预测样本分类
     image = cv2.resize(image, (imgsize, imgsize))
     image.astype('float32')
     image /= 255
 
     #归一化
     result = model.predict(image)
     result = result*1000+20
 
     print(result)
     return result

使用了fit_generator这一方法,加入了learning_rate,LearningRateScheduler,early_stop等参数。

第六步:图像验证

 import tes_main
 from keras.preprocessing.image import load_img, img_to_array
 import numpy as np
 import cv2
 FILE_PATH = 'E:\\pycode\\facial-keypoints-master\\code\\CNN_model_final.h5'
 imgsize =178
 def point(img,x, y):
     cv2.circle(img, (x, y), 1, (0, 0, 255), 10)
 
 Model = tes_main.Model()
 model = Model.__CNN__()
 Model.load(model,FILE_PATH)
 img = []
 # path = "D:\\Users\\a\\Pictures\\face_landmark_data\data\\test\\000803.jpg"
 path = "E:\pycode\\facial-keypoints-master\data\\50000test\\049971.jpg"
 # image = load_img(path)
 # img.append(img_to_array(image))
 # img_data = np.array(img)
 imgs = cv2.imread(path)
 # img_datas = np.reshape(imgs,(imgsize, imgsize,3))
 image = cv2.resize(imgs, (imgsize, imgsize))
 rects = Model.predict(model,imgs)
 
 for x, y, w, h, a,b,c,d,e,f in rects:
     point(image,x,y)
     point(image,w, h)
     point(image,a,b)
     point(image,c,d)
     point(image,e,f)
 
 cv2.imshow('img', image)
 cv2.waitKey(0)
 cv2.destroyAllWindows()

完整代码如下

 from tensorflow.contrib.keras.api.keras.preprocessing.image import ImageDataGenerator,img_to_array
 from keras.models import Sequential
 from keras.layers.core import Dense, Dropout, Activation, Flatten
 from keras.layers.advanced_activations import PReLU
 from keras.layers.convolutional import Conv2D, MaxPooling2D,ZeroPadding2D
 from keras.preprocessing.image import load_img, img_to_array
 from keras.optimizers import  SGD
 import numpy as np
 import cv2
 from keras.callbacks import *
 import keras
 
 FILE_PATH = 'E:\\pycode\\facial-keypoints-master\\code\\CNN_model_final.h5'
 trainpath = 'E:/pycode/facial-keypoints-master/data/50000train/'
 testpath = 'E:/pycode/facial-keypoints-master/data/50000test/'
 imgsize = 178
 train_samples =40000
 test_samples = 200
 batch_size = 32
 def __data_label__(path):
     f = open(path + "lable-40.txt", "r")
     j = 0
     i = -1
     datalist = []
     labellist = []
     while True:
 
         for line in f.readlines():
             i += 1
             j += 1
             a = line.replace("\n", "")
             b = a.split(",")
             lable = b[1:]
             # print(b[1:])
             #对标签进行归一化(不归一化也行)
             # for num in b[1:]:
             #     lab = int(num) / 255.0
             #     labellist.append(lab)
             # lab = labellist[i * 10:j * 10]
             imgname = path + b[0]
             images = load_img(imgname)
             images = img_to_array(images).astype('float32')
             # 对图片进行归一化(不归一化也行)
             # images /= 255.0
             image = np.expand_dims(images, axis=0)
             lables = np.array(lable)
 
             # lable =keras.utils.np_utils.to_categorical(lable)
             # lable = np.expand_dims(lable, axis=0)
             lable = lables.reshape(1, 10)
 
             yield (image,lable)
 
 ###############:
 
 # 开始建立CNN模型
 ###############
 
 # 生成一个model
 class Model(object):
     def __CNN__(self):
         model = Sequential()#218*178*3
         model.add(Conv2D(32, (3, 3), input_shape=(imgsize, imgsize, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Conv2D(32, (3, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Conv2D(64, (3, 3)))
         model.add(Activation('relu'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
 
         model.add(Flatten())
         model.add(Dense(64))
         model.add(Activation('relu'))
         model.add(Dropout(0.5))
         model.add(Dense(10))
         model.summary()
         return model
 
     def train(self,model):
         # print(lable.shape)
         model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
         # optimizer = SGD(lr=0.03, momentum=0.9, nesterov=True)
         # model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
         epoch_num = 10
         learning_rate = np.linspace(0.03, 0.01, epoch_num)
         change_lr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
         early_stop = EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto')
         check_point = ModelCheckpoint('CNN_model_final.h5', monitor='val_loss', verbose=0, save_best_only=True,
                                       save_weights_only=False, mode='auto', period=1)
 
         model.fit_generator(__data_label__(trainpath),callbacks=[check_point,early_stop,change_lr],samples_per_epoch=int(train_samples // batch_size),
                             epochs=epoch_num,validation_steps = int(test_samples // batch_size),validation_data=__data_label__(testpath))
 
         # model.fit(traindata, trainlabel, batch_size=32, epochs=50,
         #           validation_data=(testdata, testlabel))
         model.evaluate_generator(__data_label__(testpath))
 
     def save(self,model, file_path=FILE_PATH):
         print('Model Saved.')
         model.save_weights(file_path)
 
     def load(self,model, file_path=FILE_PATH):
         print('Model Loaded.')
         model.load_weights(file_path)
 
     def predict(self,model,image):
         # 预测样本分类
         print(image.shape)
         image = cv2.resize(image, (imgsize, imgsize))
         image.astype('float32')
         image = np.expand_dims(image, axis=0)
 
         #归一化
         result = model.predict(image)
 
         print(result)
         return result

 

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值