dilb人脸特征提取
# 从人脸图像文件中提取人脸特征存入 CSV
# Features extraction from images and save into features_all.csv
# return_128d_features() 获取某张图像的128D特征
# compute_the_mean() 计算128D特征均值
from cv2 import cv2 as cv2
import os
import dlib
from skimage import io
import csv
import numpy as np
# 要读取人脸图像文件的路径
path_images_from_camera = "C:/Users/DEDSEC/Smile/files2/test/"
# Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()
# Dlib 人脸预测器
predictor = dlib.shape_predictor("D:/shape_predictor_68_face_landmarks.dat")
# Dlib 人脸识别模型
# Face recognition model, the object maps human faces into 128D vectors
face_rec = dlib.face_recognition_model_v1("D:/dlib_face_recognition_resnet_model_v1.dat")
# 返回单张图像的 128D 特征
def return_128d_features(path_img):
img_rd = io.imread(path_img)
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB)
faces = detector(img_gray, 1)
print("%-40s %-20s" % ("检测到人脸的图像 / image with faces detected:", path_img), '\n')
# 因为有可能截下来的人脸再去检测,检测不出来人脸了
# 所以要确保是 检测到人脸的人脸图像 拿去算特征
if len(faces) != 0:
shape = predictor(img_gray, faces[0])
face_descriptor = face_rec.compute_face_descriptor(img_gray, shape)
else:
face_descriptor = 0
print("no face")
return face_descriptor
# 将文件夹中照片特征提取出来, 写入 CSV
def return_features_mean_personX(path_faces_personX):
features_list_personX = []
photos_list = os.listdir(path_faces_personX)
if photos_list:
for i in range(len(photos_list)):
# 调用return_128d_features()得到128d特征
print("%-40s %-20s" % ("正在读的人脸图像 / image to read:", path_faces_personX + "/" + photos_list[i]))
features_128d = return_128d_features(path_faces_personX + "/" + photos_list[i])
# print(features_128d)
# 遇到没有检测出人脸的图片跳过
if features_128d == 0:
i += 1
else:
features_list_personX.append(features_128d)
i1=str(i+1)
add="D:/myworkspace/JupyterNotebook/Smile/feature/face_feature"+i1+".csv"
print(add)
with open(add, "w", newline="") as csvfile:
writer1 = csv.writer(csvfile)
writer1.writerow(features_128d)
else:
print("文件夹内图像文件为空 / Warning: No images in " + path_faces_personX + '/', '\n')
# 计算 128D 特征的均值
# N x 128D -> 1 x 128D
if features_list_personX:
features_mean_personX = np.array(features_list_personX).mean(axis=0)
else:
features_mean_personX = '0'
return features_mean_personX
# 读取某人所有的人脸图像的数据
people = os.listdir(path_images_from_camera)
people.sort()
with open("D:/myworkspace/JupyterNotebook/Smile/feature/features2_all.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for person in people:
print("##### " + person + " #####")
# Get the mean/average features of face/personX, it will be a list with a length of 128D
features_mean_personX = return_features_mean_personX(path_images_from_camera + person)
writer.writerow(features_mean_personX)
print("特征均值 / The mean of features:", list(features_mean_personX))
print('\n')
print("所有录入人脸数据存入 / Save all the features of faces registered into: D:/myworkspace/JupyterNotebook/Smile/feature/features2_all.csv")
模型训练
# use the saved model
import joblib
from smile_dlib_tezhengdian import get_features
import smile_test1
import cv2
# path of test img
path_test_img = "C:/Users/test_nosmile.jpg"
# 提取单张40维度特征
positions_lip_test = get_features(path_test_img)
# path of models
path_models = "C/Smile/data/data_models/"
print("The result of"+path_test_img+":")
print('\n')
# ######### LR ###########
LR = joblib.load(path_models+"model_LR.m")
ss_LR = smile_test1.model_LR()
X_test_LR = ss_LR.transform([positions_lip_test])
y_predict_LR = str(LR.predict(X_test_LR)[0]).replace('0', "no smile").replace('1', "with smile")
print("LR:", y_predict_LR)
# ######### LSVC ###########
LSVC = joblib.load(path_models+"model_LSVC.m")
ss_LSVC = smile_test1.model_LSVC()
X_test_LSVC = ss_LSVC.transform([positions_lip_test])
y_predict_LSVC = str(LSVC.predict(X_test_LSVC)[0]).replace('0', "no smile").replace('1', "with smile")
print("LSVC:", y_predict_LSVC)
# ######### MLPC ###########
MLPC = joblib.load(path_models+"model_MLPC.m")
ss_MLPC = smile_test1.model_MLPC()
X_test_MLPC = ss_MLPC.transform([positions_lip_test])
y_predict_MLPC = str(MLPC.predict(X_test_MLPC)[0]).replace('0', "no smile").replace('1', "with smile")
print("MLPC:", y_predict_MLPC)
# ######### SGDC ###########
SGDC = joblib.load(path_models+"model_SGDC.m")
ss_SGDC = smile_test1.model_SGDC()
X_test_SGDC = ss_SGDC.transform([positions_lip_test])
y_predict_SGDC = str(SGDC.predict(X_test_SGDC)[0]).replace('0', "no smile").replace('1', "with smile")
print("SGDC:", y_predict_SGDC)
img_test = cv2.imread(path_test_img)
img_height = int(img_test.shape[0])
img_width = int(img_test.shape[1])
# show the results on the image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_test, "LR: "+y_predict_LR, (int(img_height/10), int(img_width/10)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "LSVC: "+y_predict_LSVC, (int(img_height/10), int(img_width/10*2)), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "MLPC: "+y_predict_MLPC, (int(img_height/10), int(img_width/10)*3), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.putText(img_test, "SGDC: "+y_predict_SGDC, (int(img_height/10), int(img_width/10)*4), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
cv2.namedWindow("img", 2)
cv2.imshow("img", img_test)
cv2.waitKey(0)
相机检测照片
# use the saved model
import joblib
import smile_test1
import dlib # 人脸处理的库 Dlib
import numpy as np # 数据处理的库 numpy
import cv2 # 图像处理的库 OpenCv
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('D:/shape_predictor_68_face_landmarks.dat')
# OpenCv 调用摄像头
cap = cv2.VideoCapture(0)
# 设置视频参数
cap.set(3, 480)
def get_features(img_rd):
# 输入: img_rd: 图像文件
# 输出: positions_lip_arr: feature point 49 to feature point 68, 20 feature points / 40D in all
# 取灰度
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# 计算68点坐标
positions_68_arr = []
faces = detector(img_gray, 0)
landmarks = np.matrix([[p.x, p.y] for p in predictor(img_rd, faces[0]).parts()])
for idx, point in enumerate(landmarks):
# 68点的坐标
pos = (point[0, 0], point[0, 1])
positions_68_arr.append(pos)
positions_lip_arr = []
# 将点 49-68 写入 CSV
# 即 positions_68_arr[48]-positions_68_arr[67]
for i in range(48, 68):
positions_lip_arr.append(positions_68_arr[i][0])
positions_lip_arr.append(positions_68_arr[i][1])
return positions_lip_arr
while cap.isOpened():
# 480 height * 640 width
flag, img_rd = cap.read()
kk = cv2.waitKey(1)
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# 人脸数 faces
faces = detector(img_gray, 0)
# 检测到人脸
if len(faces) != 0:
# 提取单张40维度特征
positions_lip_test = get_features(img_rd)
# path of models
path_models = "D:/myworkspace/JupyterNotebook/Smile/data/data_models/"
# ######### LR ###########
LR = joblib.load(path_models+"model_LR.m")
ss_LR = smile_test1.model_LR()
X_test_LR = ss_LR.transform([positions_lip_test])
y_predict_LR = str(LR.predict(X_test_LR)[0]).replace('0', "no smile").replace('1', "with smile")
print("LR:", y_predict_LR)
# ######### LSVC ###########
LSVC = joblib.load(path_models+"model_LSVC.m")
ss_LSVC = smile_test1.model_LSVC()
X_test_LSVC = ss_LSVC.transform([positions_lip_test])
y_predict_LSVC = str(LSVC.predict(X_test_LSVC)[0]).replace('0', "no smile").replace('1', "with smile")
print("LSVC:", y_predict_LSVC)
# ######### MLPC ###########
MLPC = joblib.load(path_models+"model_MLPC.m")
ss_MLPC = smile_test1.model_MLPC()
X_test_MLPC = ss_MLPC.transform([positions_lip_test])
y_predict_MLPC = str(MLPC.predict(X_test_MLPC)[0]).replace('0', "no smile").replace('1', "with smile")
print("MLPC:", y_predict_MLPC)
# ######### SGDC ###########
SGDC = joblib.load(path_models+"model_SGDC.m")
ss_SGDC = smile_test1.model_SGDC()
X_test_SGDC = ss_SGDC.transform([positions_lip_test])
y_predict_SGDC = str(SGDC.predict(X_test_SGDC)[0]).replace('0', "no smile").replace('1', "with smile")
print("SGDC:", y_predict_SGDC)
print('\n')
# 按下 'q' 键退出
if kk == ord('q'):
break
# 窗口显示
# cv2.namedWindow("camera", 0) # 如果需要摄像头窗口大小可调
cv2.imshow("camera", img_rd)
# 释放摄像头
cap.release()
# 删除建立的窗口
cv2.destroyAllWindows()
效果: