embedding_model=‘nn4.small2.v1.t7’
confidence_low=0.5
dataset_path:数据集路径
embeddings_path:输出编码文件的路径
detector_path:人脸检测模型的路径
embedding_model:编码模型
confidence_low:最低的置信度。
接下来就是代码的最重要的部分:
print(“loading face detector…”)
protoPath = os.path.sep.join([detector_path, “deploy.proto.txt”])
modelPath = os.path.sep.join([detector_path,“res10_300x300_ssd_iter_140000_fp16.caffemodel”])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
加载序列化的人脸编码模型
print(“loading face recognizer…”)
embedder = cv2.dnn.readNetFromTorch(embedding_model)
获取数据集中输入图像的路径
print(“quantifying faces…”)
imagePaths = list(list_images(dataset_path))
初始化我们提取的面部编码列表和相应的人名
knownEmbeddings = []
knownNames = []
初始化处理的人脸总数
total = 0
loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
extract the person name from the image path
print(“processing image {}/{}”.format(i + 1,len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
加载图像,将其调整为宽度为 600 像素(同时保持纵横比),然后抓取图像尺寸
image = cv2.imread(imagePath)
image = resize(image, width=600)
(h, w) = image.shape[:2]
从图像构建一个 blob
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
使用 OpenCV 的基于深度学习的人脸检测器来定位输入图像中的人脸
detector.setInput(imageBlob)
detections = detector.forward()
ensure at least one face was found
if len(detections) > 0:
假设每个图像只有一张脸,所以找到概率最大的边界框
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
确保最大概率的检测也意味着我们的最小概率测试(从而帮助过滤掉弱检测)
if confidence > confidence_low:
计算人脸边界框的 (x, y) 坐标
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype(“int”)
提取人脸ROI并抓取ROI维度
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
确保人脸宽度和高度足够大
if fW < 20 or fH < 20:
continue
为人脸 ROI 构造一个 blob,然后将 blob 通过我们的人脸嵌入模型来获得人脸的 128-d 量化
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()