基于Opencv和MTCNN检测人脸五个关键点进行仿射变换人脸对齐 - C++版本

这是一个使用C++编写的代码,实现了基于MTCNN的人脸检测和仿射变换进行人脸对齐的功能。代码首先通过MTCNN检测人脸关键点,然后外扩图像以避免对齐后出现黑边,最后根据预设的目标点坐标进行仿射变换,生成112x96和112x112尺寸的对齐人脸图片。整个过程依赖OpenCV库,并提供了完整的工程下载链接。
摘要由CSDN通过智能技术生成

这篇文章是对应之前人脸对齐的python版本的C++版本,之前python版本见这里。通俗理解就是将图片人人脸姿态不太正确的给矫正过来,所以写了C++版本的人脸对齐算法。基本原理是先通过MTCNN检测到人脸的五个关键点,再把原图中人脸区域外扩100%(这样做的目的是保证对齐后图片中没有黑色区域,当然这个外扩的比例是看对齐效果自己可以调节的,我这里设置的100%)。最后的人脸对齐尺寸分为两种:112X96尺寸和112X112尺寸,其中首先需要定死仿射变换后人脸在目标图上的坐标,然后直接变换。废话不多说,直接手撕代码。

#include<iostream>
#include<fstream>
#include<stdio.h>
#include<math.h>
#include<opencv2/opencv.hpp>
#include<opencv2/dnn.hpp>
#include<opencv2/highgui.hpp>
#include<opencv2/imgproc.hpp>

#include "detect/mtcnn/detector.h"
#include "detect/draw.hpp"
#include "align/FaceProprocess.h"

#include "align/FaceAlign.h"

int main()
{
	/检测部分/
	std::string modelPath = "detect/models/";
	std::string picPath;

	ProposalNetwork::Config pConfig;
	pConfig.caffeModel = modelPath + "det1.caffemodel";
	pConfig.protoText = modelPath + "det1.prototxt";
	pConfig.threshold = 0.6f;

	RefineNetwork::Config rConfig;
	rConfig.caffeModel = modelPath + "/det2.caffemodel";
	rConfig.protoText = modelPath + "/det2.prototxt";
	rConfig.threshold = 0.7f;

	OutputNetwork::Config oConfig;
	oConfig.caffeModel = modelPath + "/det3.caffemodel";
	oConfig.protoText = modelPath + "/det3.prototxt";
	oConfig.threshold = 0.7f;

	MTCNNDetector detector(pConfig, rConfig, oConfig);
	ifstream infile("data.txt");
	if (!infile) {//文件没打开
		std::cout << "文件不存在!" << std::endl;
	}
	while (infile >> picPath)
	{
		cv::Mat img = cv::imread(picPath);

		std::vector<Face> faces;

		{
			faces = detector.detect(img, 20.f, 0.709f);
		}

		std::cout << "Number of faces found in the supplied image - " << faces.size()
			<< std::endl;

		std::vector<rectPoints> data;
		/检测部分/

		/对齐部分1/

		double coord5point1[10] = { 30.2946, 65.5318, 48.0252, 33.5493, 62.7299, 51.6963, 51.5014, 71.7366, 92.3655, 92.2041 }; //112x96的目标点
		double coord5point2[10] = { 30.2946 + 8.0000, 65.5318 + 8.0000, 48.0252 + 8.0000, 33.5493 + 8.0000, 62.7299 + 8.0000, 51.6963, 51.5014, 71.7366, 92.3655, 92.2041 }; //112x112的目标点

		for (size_t i = 0; i < faces.size(); i++)
		{
			int x1 = faces[i].bbox.x1;
			int y1 = faces[i].bbox.y1;
			int x2 = faces[i].bbox.x2;
			int y2 = faces[i].bbox.y2;

			//外扩100%,防止对齐后人脸出现黑边
			int new_x1 = std::max(int(1.50 * x1 - 0.50 * x2), 0);
			int new_x2 = std::min(int(1.50 * x2 - 0.50 * x1), img.cols - 1);
			int new_y1 = std::max(int(1.50 * y1 - 0.50 * y2), 0);
			int new_y2 = std::min(int(1.50 * y2 - 0.50 * y1), img.rows - 1);
			Mat enlargedFace = img(Rect(new_x1, new_y1, new_x2 - new_x1 + 1, new_y2 - new_y1 + 1));
			//imwrite("enlargedFace.jpg", enlargedFace);

			//得到原始图中关键点
			int left_eye_x = faces[i].ptsCoords[0];
			int left_eye_y = faces[i].ptsCoords[1];
			int right_eye_x = faces[i].ptsCoords[2];
			int right_eye_y = faces[i].ptsCoords[3];
			int nose_x = faces[i].ptsCoords[4];
			int nose_y = faces[i].ptsCoords[5];
			int left_mouth_x = faces[i].ptsCoords[6];
			int left_mouth_y = faces[i].ptsCoords[7];
			int right_mouth_x = faces[i].ptsCoords[8];
			int right_mouth_y = faces[i].ptsCoords[9];

			//得到外扩100% 后图中关键点坐标(外扩的目的是为了防止对齐后出现黑边)
			int new_left_eye_x = left_eye_x - new_x1;
			int new_right_eye_x = right_eye_x - new_x1;
			int new_nose_x = nose_x - new_x1;
			int new_left_mouth_x = left_mouth_x - new_x1;
			int new_right_mouth_x = right_mouth_x - new_x1;
			int new_left_eye_y = left_eye_y - new_y1;
			int new_right_eye_y = right_eye_y - new_y1;
			int new_nose_y = nose_y - new_y1;
			int new_left_mouth_y = left_mouth_y - new_y1;
			int new_right_mouth_y = right_mouth_y - new_y1;

			double M[6];
			double enlargeFace_landmark[10] = { new_left_eye_x, new_right_eye_x, new_nose_x, new_left_mouth_x, new_right_mouth_x, new_left_eye_y, new_right_eye_y, new_nose_y, new_left_mouth_y, new_right_mouth_y };
			//112x112
			getAffineMatrix(enlargeFace_landmark, coord5point2, M);
			Mat warp_mat_112x112 = (Mat_<float>(2, 3) << M[0], M[1], M[2], M[3], M[4], M[5]);
			Mat alignFace_112x112 = Mat::zeros(112, 112, img.type());
			warpAffine(enlargedFace, alignFace_112x112, warp_mat_112x112, alignFace_112x112.size());//裁剪图片
			int position = picPath.find("/", 0);
			imwrite("result/" + picPath.substr(position + 1, 4) + "_align_112x112.jpg", alignFace_112x112);

			//112x96
			getAffineMatrix(enlargeFace_landmark, coord5point1, M);
			Mat warp_mat_112x96 = (Mat_<float>(2, 3) << M[0], M[1], M[2], M[3], M[4], M[5]);
			Mat alignFace_112x96 = Mat::zeros(112, 96, img.type());
			warpAffine(enlargedFace, alignFace_112x96, warp_mat_112x96, alignFace_112x96.size());//裁剪图片
			imwrite("result/" + picPath.substr(position + 1, 4) + "_align_112x96.jpg", alignFace_112x96);
		}
	}

	/对齐部分1/
	
	
	return 0;
}

运行上述代码,效果如下所示:

注意事项:该工程通过Visual studio 2019编写,环境仅仅依赖OpenCV,下载到工程后,需要设置OpenCV的包含目录和库目录,并设置附加依赖项。然后直接打开工程运行即可得到上述结果过。

完整的工程下载点这里(code: face), 欢迎下载使用。
 

评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

鸡啄米的时光机

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值