人脸检测的简单实验

OpenCV和DNN结合实现人脸检测

本人在工作之余,做了一个小功能,在动手之前阅读了不少文档,从而实现人脸检测功能,做这个目的有二,一方面是出于爱好,另一方面是提高自身编码能力。
1.下面是程序的流程图
在这里插入图片描述
实现步骤:
A) 首先需要先加载DNN模型文件,用深度学习DNN模型检测出人脸区域并进行裁剪,见下图,然后用opencv裁剪出人脸部分;
在这里插入图片描述
B) 然后把人脸图像转换位灰度图,通过HSV模型计算出二值图像,见下图-二值图像;
在这里插入图片描述
C) 其中白色区域为皮肤区域,黑色部分为非皮肤区域,最后计算出皮肤区域面积占人脸图像面积的比值,如果计算的比值低于0.65,则认为是带口罩,否则为不带口罩。

实验结果图:
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
2程序代码
2.1. 新建基类BaseDetect,把公用的方法写在基类里,再新建图像检测派生类和视频检测派生类,避免重复封装函数,减少冗余代码。

class BaseDetect
{
public:
	BaseDetect() {}
	~BaseDetect() {}

	void ShowFaceRoi(Mat mat, vector<Rect> vector);//裁剪出人脸区域
	bool ShowFaceRoi(Mat mat, Rect rect);//裁剪出人脸区域,返回值为是否戴口罩
	bool HasMask(Mat mat, char* title);//通过人脸肤色占人脸图像大小的比例判断是否戴口罩
};

2.2 通过opencv库函数mat(mat, rect)裁剪出人脸区域.

Void ShowFaceRoi(Mat mat, vector<Rect> vector)
{
	Mat matClone;
	for (int i = 0; i < vector.size(); i++)
	{
		matClone = mat.clone();
		Mat matRoi = Mat(matClone, vector.at(i));

		char title[16];
		sprintf_s(title, sizeof(title), "ROI_%d", i);
		//imshow(title, matRoi);
		HasMask(matRoi, title);
	}
}

bool ShowFaceRoi(Mat mat, Rect rect)
{
	static int i = 0;
	Mat matClone;
	matClone = mat.clone();
	Mat matRoi = Mat(matClone, rect);

	char title[16];
	sprintf_s(title, sizeof(title), "ROI_%d", i++);
	imshow(title, matRoi);
	bool hasMask = HasMask(matRoi, title);
	return hasMask;
}

2.3 通过opencv先把彩色图像转化为HSV图像,结合HSV对面部皮肤敏感的特点,需要前期统计处人脸皮肤的h/s/v的敏感范围值,然后利用opencv库函数findContours计算出面部皮肤区域面积Amask,再计算Amask/Aroi的值,如果计算结果小于0.6,则判断是戴口罩。

bool HasMask(Mat mat, char * title)
{
	Mat hsv = mat.clone();
	Mat mask1, mask2;
	cvtColor(mat, hsv, COLOR_BGR2HSV);

	inRange(hsv, Scalar(0, 30, 30), Scalar(35, 255, 255), mask1);
	inRange(hsv, Scalar(145, 30, 30), Scalar(180, 255, 255), mask2);

	Mat mask = mask1 + mask2;

	string strtitle = title;
	strtitle.replace(0, 3, "HSV");
	imshow(strtitle, mask);

	//通过人脸面积判断是否戴口罩
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(mask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point());
	vector<double> vecContoursArea;
	for (auto &contour : contours)
	{
		double area = contourArea(contour);
		if (area > 3)
		{
			vecContoursArea.push_back(area);
		}
	}
	sort(vecContoursArea.begin(), vecContoursArea.end(), greater<double>());//降序
	bool bHasMask = false;
	if (contours.size() > 1)
	{
		double dFaceArea = vecContoursArea[0];

		double dMaskRate = dFaceArea / (mat.rows * mat.cols);
		if (dMaskRate < 0.60)
		{
			bHasMask = true;
		}

	}
	return bHasMask;
}

2.4. 图像检测代码,继承基类BaseDetect,初始化工作主要是获得图像大小和人脸检测模型文件,然后从指定目录加载图像,利用深度学习网络模型检测出人脸区域,并调用基类裁剪面部函数,获得面部图像以及是否戴口罩标志,如果戴口罩则用红色框显示面部区域,如果不戴口罩则用绿色框显示面试区域。

class ImageDetect : BaseDetect
{
public:
	ImageDetect();
	~ImageDetect();

	void InitParams(const size_t nWidth, const size_t nHeight, const double dScaleFactor, const Scalar scaMeanVal);
	void SetModelResources(const string modelConfig, const string modelBinary);
	int StartImageDetect(const string pathName);//检测图片中的人脸
	
private:
	size_t m_nWidth;
	size_t m_nHeight;
	double m_dScaleFactor;
	Scalar m_scaMeanVal;

	string m_strModelconfig;
	string m_strModelBinary;
};

int StartImageDetect(const string pathName)
{
	Mat matImage;
	matImage = imread(pathName);
	if (matImage.empty() || !matImage.data)
	{
		cerr<<"Load image error."<<endl;
		return -1;
	}
	Mat matTemp = matImage;
	float min_confidence = 0.5;

	dnn::Net net = readNetFromCaffe(m_strModelconfig, m_strModelBinary);

	if (net.empty())
	{
		cerr << "Can't load network by using the following files: " << endl;
		cerr << "prototxt:   " << m_strModelconfig << endl;
		cerr << "caffemodel: " << m_strModelBinary << endl;
		return -1;
	}

	Mat inputBlob = dnn::blobFromImage(matImage, m_dScaleFactor, Size(m_nWidth, m_nHeight), m_scaMeanVal, false, false);
	net.setInput(inputBlob, "data");    
	Mat detection = net.forward("detection_out");    
	vector<double> layersTimings;
	double freq = getTickFrequency() / 1000;
	double time = net.getPerfProfile(layersTimings) / freq;

	Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
	ostringstream ss;
	ss << "FPS: " << 1000 / time << " ; time: " << time << "ms" << endl;

	vector<Rect> vecRect;
	float confidenceThreshold = min_confidence;
	for (int i = 0; i < detectionMat.rows; ++i)
	{
		float confidence = detectionMat.at<float>(i, 2);

		if (confidence > confidenceThreshold)
		{
			int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * matImage.cols);
			int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * matImage.rows);
			int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * matImage.cols);
			int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * matImage.rows);
			Rect faceRect((int)xLeftBottom, (int)yLeftBottom, (int)(xRightTop - xLeftBottom), (int)(yRightTop - yLeftBottom));
			
			ss.str("");
			ss << confidence;
			String conf(ss.str());
			
			if (ShowFaceRoi(matTemp, faceRect))
			{
				rectangle(matImage, faceRect, Scalar(0, 0, 255), 2);
			}
			else
			{
				rectangle(matImage, faceRect, Scalar(0, 255, 0), 2);
			}
		}
	}

	ShowFaceRoi(matTemp, vecRect);
	namedWindow("Face_Detection", WINDOW_NORMAL);
	imshow("Face_Detection", matImage);
	waitKey(0);
	return 0;
}

2.5. 视频检测代码,继承基类BaseDetect,初始化工作主要是获得图像大小和人脸检测模型文件,然后从指定目录加载mp4视频并通过循环获取每一帧图像,每帧图像利用深度学习网络模型检测出人脸区域,并调用基类裁剪面部函数,获得面部图像以及是否戴口罩标志,如果戴口罩则用红色框显示面部区域,如果不戴口罩则用绿色框显示面试区域。

int StartVideoDetect(const string pathName)
{
	cout << "start video detection." << endl;
	float min_confidence = 0.5;
	String modelConfiguration = m_strModelconfig;
	String modelBinary = m_strModelBinary;
	  
	dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
	 
	if (net.empty())
	{
		cerr << "Can't load network by using the following files: " << endl;
		cerr << "prototxt:   " << modelConfiguration << endl;
		cerr << "caffemodel: " << modelBinary << endl;
		return -1;
	}

	VideoCapture capture(pathName);
	if (!capture.isOpened())
	{
		cout << "Couldn't open video : " << pathName << endl;
		return -1;
	}
	while(1)
	{
		Mat frame;
		capture >> frame;   
		Mat matTemp = frame;
		if (frame.empty())
		{
			waitKey();
			break;
		}

		if (frame.channels() == 4)
			cvtColor(frame, frame, COLOR_BGRA2BGR);
		Mat inputBlob = blobFromImage(frame, m_dScaleFactor,
			Size(m_nWidth, m_nHeight), m_scaMeanVal, false, false);  
  
		net.setInput(inputBlob, "data");  								   
		Mat detection = net.forward("detection_out");   
		vector<double> layersTimings;
		double freq = getTickFrequency() / 1000;
		double time = net.getPerfProfile(layersTimings) / freq;

		Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

		ostringstream ss;
		ss << "FPS: " << 1000 / time << " ; time: " << time << " ms";
		putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(0, 0, 255));

		float confidenceThreshold = min_confidence;
		for (int i = 0; i < detectionMat.rows; i++)
		{
			float confidence = detectionMat.at<float>(i, 2);

			if (confidence > confidenceThreshold)
			{
				int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
				int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
				int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
				int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);

				Rect faceRect((int)xLeftBottom, (int)yLeftBottom,
					(int)(xRightTop - xLeftBottom),
					(int)(yRightTop - yLeftBottom));

				rectangle(frame, faceRect, Scalar(0, 255, 0));

				ss.str("");
				ss << confidence;
				String conf(ss.str());
				String label = "";

				if (ShowFaceRoi(matTemp, faceRect))
				{
					rectangle(frame, faceRect, Scalar(0, 0, 255), 2);
				}
				else
				{
					rectangle(frame, faceRect, Scalar(0, 255, 0), 2);
				}
				
			}
		}
		cv::imshow("detections", frame);
		if (waitKey(1) >= 0) break;
	}
	
	return 0;
}

main.cpp

// FaceDetection.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include <iostream>
#include "VideoDetect.h"
#include "ImageDetect.h"
#include <opencv2/highgui/highgui_c.h>


using namespace cv;
using namespace cv::dnn;
using namespace std;
const size_t inWidth = 300;
const size_t inHeight = 300;
const double inScaleFactor = 1.0;
const Scalar meanVal(104.0, 177.0, 123.0);
const String modelConfig = "face_detector/deploy.prototxt";//
const String modelBinary = "face_detector/res10_300x300_ssd_iter_140000.caffemodel";//

void videoDetection(string pathname);
void imageDetection(string pathname);
void training();

int main(int argc, char** argv)
{
	int code = -1;
	while (code)
	{
		cout << "Please input detection type..." << endl;
		cout << "<---image detection: 1--->" << endl;
		cout << "<---video detection: 2--->" << endl;
		cout << "<---exit: 0--->" << endl;
		int nDetectType = 0;
		cin >> nDetectType;
		code = nDetectType;

		string strPathname;
		switch (nDetectType)
		{
		case 0:
			cerr << "exit." << endl;
			break;
		case 1:
			cout << "Please input image pathname..." << endl;
			cin >> strPathname;
			imageDetection(strPathname);
			break;
		case 2:
			cout << "Please input video pathname..." << endl;
			cin >> strPathname;
			videoDetection(strPathname);
			break;
		default:
			
			cerr << "input parameters invalid." << endl;
			break;
		}
		cout << endl;
	}


	return 0;
}

void videoDetection(string pathname)
{
	VideoDetect *vd = new VideoDetect();
	vd->InitParams(inWidth, inHeight, inScaleFactor, meanVal);
	vd->SetModelResources(modelConfig, modelBinary);
	cout << "Init video resources success." << endl;
	int iRet = vd->StartVideoDetect(pathname);
	if (0 == iRet)
	{
		cout << "Detect video success." << endl;
	}
	else {
		cerr << "Detect video error." << endl;
	}
	delete vd;
	vd = nullptr;
}

void imageDetection(string pathname)
{
	ImageDetect *imageDetection = new ImageDetect();
	imageDetection->InitParams(inWidth, inHeight, inScaleFactor, meanVal);
	imageDetection->SetModelResources(modelConfig, modelBinary);
	cout << "Init image resources success." << endl;
	int iRet = imageDetection->StartImageDetect(pathname);
	if (0 == iRet)
	{
		cout << "Detect image success." << endl;
	}
	else {
		cerr << "Detect image error." << endl;
	}
	delete imageDetection;
	imageDetection = nullptr;
}

void training()
{
	/***********************************************训练***********************************************/
	//加载正负样本
	string positive_path = "D:\\opencv_c++\\opencv_tutorial\\data\\test\\positive\\";
	string negative_path = "D:\\opencv_c++\\opencv_tutorial\\data\\test\\negative\\";
	//通过glob()将路径下的所有图像文件以string类型读取进来
	vector<string> positive_images_str, negative_images_str;
	glob(positive_path, positive_images_str);
	glob(negative_path, negative_images_str);
	//将string类型的图像数据转换为Mat类型
	vector<Mat>positive_images, negative_images;
	for (int i = 0; i < positive_images_str.size(); i++)
	{
		Mat positive_image = imread(positive_images_str[i]);
		//resize(positive_image, positive_image, Size(64, 128));
		positive_images.push_back(positive_image);
	}
	for (int j = 0; j < negative_images_str.size(); j++)
	{
		Mat negative_image = imread(negative_images_str[j]);
		//resize(negative_image, negative_image, Size(64, 128));
		negative_images.push_back(negative_image);
	}
	string savePath = "face_mask_detection.xml";
	//trainSVM(positive_images, negative_images, savePath);cvSVM::tarin

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值