图片质量估计-如何判定一个人脸是否为阴阳脸
前言:
接上次python版本的判定,c++版本的阴阳脸判定方法,同样的,如下的代码依赖opencv和dlib人脸检测部分;
void checkyinyang(string imgpath) {
int fontFace = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
FaceBox facebox;
facebox = detect(imgpath);
cv::Mat temp;
//cv::Mat mask;
temp = cv::imread(imgpath);
int w = temp.rows;
int h = temp.cols;
//mask.create(w, h, CV_8UC1);
cv::Mat mask = cv::Mat::zeros(w, h, CV_8UC1);
cv::Mat mask2 = cv::Mat::zeros(w, h, CV_8UC1);
for (int i = 0; i < 68; i++) {
//cv::putText(temp, std::to_string(i), cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i + 1]), fontFace, 0.5, cv::Scalar::all(255), 1, 1);
//cv::circle(temp, cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i + 1]), 3, cv::Scalar(0, 0, 255), -1);
}
cv::Point pt = cv::Point(facebox.landmark[2 * 0], facebox.landmark[2 * 0 + 1]);
std::cout << "pt.x = " << pt.x << std::endl;
//add all points of the contour to the vector
std::vector<std::vector<cv::Point> > fillContAll;
//fill the single contour
std::vector<cv::Point> fillContSingle;
cv::Point polypt[1][15];
for (int i = 0; i < 9; i++) {
polypt[0][i]= cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i + 1]);
}
polypt[0][9]= cv::Point(facebox.landmark[2*28], facebox.landmark[2*28+1]);
polypt[0][10] = cv::Point(facebox.landmark[2*27], facebox.landmark[2*27+1]);
for (int i = 21; i > 17; i--) {
polypt[0][11+21-i]= cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i+1]);
}
int npt[] = {15};
int lineType = 8;
const cv::Point* ppt[1] = { polypt[0] };
cv::fillPoly(mask, ppt, npt, 1, cv::Scalar(255), lineType);
cv::Mat leftmask= cv::Mat::zeros(w, h, CV_8UC3);
cv::bitwise_and(temp, temp, leftmask,mask = mask);
std::vector<cv::Point> leftPoints;
for (int i = 0; i < 15; i++) {
leftPoints.push_back(polypt[0][i]);
}
double left_Area = cv::contourArea(cv::Mat(leftPoints));
std::vector<cv::Mat> v;
std::vector<cv::Mat> leftHsv_sum;
cv::Mat leftHsvimage, hue;
cv::cvtColor(leftmask, leftHsvimage, cv::COLOR_BGR2HSV);
cv::split(leftHsvimage, leftHsv_sum);
cv::Point polyptRight[1][14];
std::vector<cv::Point> rightPoints;
for (int i = 8; i < 16; i++) {
polyptRight[0][i-8] = cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i + 1]);
}
polyptRight[0][8] = cv::Point(facebox.landmark[2 * 26], facebox.landmark[2 * 26 + 1]);
polyptRight[0][9] = cv::Point(facebox.landmark[2 * 24], facebox.landmark[2 * 24 + 1]);
polyptRight[0][10] = cv::Point(facebox.landmark[2 * 22], facebox.landmark[2 * 22 + 1]);
for (int i = 27; i < 30; i++) {
polyptRight[0][11+ i-27] = cv::Point(facebox.landmark[2 * i], facebox.landmark[2 * i + 1]);
}
const cv::Point* ppt_right[1] = { polyptRight[0] };
int npt2[] = { 14 };
cv::fillPoly(mask2, ppt_right, npt2, 1, cv::Scalar(255), lineType);
cv::Mat rightmask = cv::Mat::zeros(w, h, CV_8UC3);
cv::bitwise_and(temp, temp, rightmask, mask = mask2);
for (int i = 0; i < 14; i++) {
rightPoints.push_back(polyptRight[0][i]);
}
double right_Area = cv::contourArea(cv::Mat(rightPoints));
std::vector<cv::Mat> rightHsv_sum;
cv::Mat rightHsvimage;
cv::cvtColor(rightmask, rightHsvimage, cv::COLOR_BGR2HSV);
cv::split(rightHsvimage, rightHsv_sum);
cout << "right_Area " << right_Area << endl;
cout << "rightmask " << sum(mask2) << endl;
cout << "right_mask " << mask.size() << endl;
cout << " mean(rightHsv_sum[2]= " << sum(rightHsv_sum[2]) << endl;
imshow("rightHsv_sum", rightHsv_sum[2]);
imshow("rightmask", rightmask);
imshow("leftHsvimage", leftHsv_sum[2]);
imshow("mask2", mask2);
imshow("Dlib特征点", temp);
cv::waitKey(0);
}
- 结果和之前python版本类似:
- 原始图片:
- 左脸区域:
- 右脸区域:
后续:实际上这种方式判定阴阳脸的方法还是有缺陷的,有些人脸还是没有那么容易区分的。上次有朋友说,使用灰度图取代原有的v通道进行判定,我实际测了下,并没有改进,所以我还是决定v通道进行判定。实际情况的阴阳脸的判定只是人脸质量检测的一部分,最终还是要为人脸识别任务服务的。单纯的过滤左右的人脸亮度不均只是一部分,带有光线不均,光斑的也是属于不正常光照,以后有好的方法再贴出来吧。