第一步:通过opencv读取摄像头,或本地存储的视频,获得人脸灰度图像,作为训练集,验证集,和测试集。
保存格式如下,例如训练集train_images 中有子目录: person0, person1, person2,…,每个子目录中放置训练用到的person名字的所有图片,格式如下:
实现代码:
import cv2
import os
import numpy as np
def create_dir(*args):
for item in args:
if not os.path.exists(item):
os.makedirs(item)
def get_padding_size(shape):
""" square rect 得到短边需填充的像素长度"""
h,w = shape
longest = max(w, h)
result = (np.array([longest]*4) - np.array([h,h,w,w]))//2
return result.tolist()
def resize_image(img, h=64, w=64):
""" 填充并裁剪图像, 使图像大小一致"""
top, bottom, left, right = get_padding_size(img.shape[0:2]) # 填充短边,使与长边一致
img = cv2.copyMakeBorder(img, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=(0,0,0))
img = cv2.resize(img, (h, w))
return img
def relight(imgsrc, alpha=1, bias=0):
""" 改变图像的亮度,增强模型的泛化能力 """
imgsrc.astype("float")
imgsrc = imgsrc * alpha + bias
imgsrc[imgsrc < 0] = 0
imgsrc[imgsrc > 255] = 255
imgsrc.astype(np.uint8)
return imgsrc
def detect_face(n, frame, haar, outdir):
"""检测人脸并保存图像到outdir文件夹中,标记出人脸位置 """
# 生成灰度图,提高检测效率
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = haar.detectMultiScale(img_gray, 1.3, 5)
for face_x, face_y, face_w, face_h in faces:
# 保存人脸图像
face = img_gray[face_x:face_x+face_w, face_y:face_y+face_h]
face = resize_image(face)
light_face = relight(face, np.random.uniform(0.5,1.5), np.random.randint(-50,50))
cv2.imwrite(os.path.join(outdir, '{}.jpg'.format(n)), light_face)
# 框出人脸
cv2.putText(frame, "name", (face_x, face_y-20),cv2.FONT_HERSHEY_COMPLEX,
1, 255, 2)
frame = cv2.rectangle(frame, (face_x, face_y),(face_x+face_w,face_y+face_h),
(255, 0,0), 2)
print("frame",frame.shape)
return frame
def get_face_from_camera(outdir):
""" 打开摄像头,调用detect_face函数, 获取人脸图像,并保存到outdir文件夹 """
create_dir(outdir)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print(size)
haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") # haar特征检测人脸
# haar.load("haarcascade_frontalface_default.xml")
for i in range(140):
print("It`s processing {} image".format(i))
ret, frame = cap.read()
frame1 = detect_face(i, frame, haar, outdir)
cv2.imshow("frame", frame1)
# time.sleep(1)
k = cv2.waitKey(20)
if k & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def get_face_from_local(local_path, outdir):
""" 读取本地视频,调用detect_face函数, 获取人脸图像,并保存到outdir文件夹"""
cap = cv2.VideoCapture(local_path)
size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))
create_dir(outdir)
haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
i = 0
while</