基于opencv检测人脸并实现跟踪
抽取图像harr特征描述然后使用级联分类器检测人脸位置,基于Kalman滤波方法每次迭代通过协方差算出来的卡尔曼增益,来自动确定上一时刻的人脸位置以及当前时刻的人脸位置的权重最终得到最合适的人脸位置。卡尔曼滤波是一个信息融合的过程,融合的是观测和预测,如果观测更可靠那么我们给观测一个更高的权值,反之则给预测更高的权值。
// Kalman_face.cpp : 定义控制台应用程序的入口点。
#include "stdafx.h"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/tracking.hpp"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <deque>
using namespace std;
using namespace cv;
/*自带分类器*/
string cascade_name = "haarcascade_frontalface_default.xml";
//string cascade_name = "D:\\opencv-3.4.6\\data\\haarcascades\\haarcascade_fullbody.xml"; //全身检测
CascadeClassifier face_cascade;
struct face {
Point leftTop = 0;
int width = 0;
int height = 0;
};
void detect_display(Mat frame, vector<Rect> &faces){
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//多尺度检测
bool is_load_file = 0;
is_load_file = face_cascade.load(cascade_name);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 3, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
}
int main(){
face preFace;
vector<Rect> faces;
Mat frame;
deque<Point2i> tracks;
//kalman参数设置
int stateNum = 4; //状态值4×1向量(x,y,△x,△y)
int measureNum = 2; //测量值2×1向量(x,y)
KalmanFilter KF(stateNum, measureNum, 0);
//Mat processNoise(stateNum, 1, CV_32F);
Mat measurement = Mat::zeros(measureNum, 1, CV_32F);
KF.transitionMatrix = (Mat_<float>(stateNum, stateNum) //状态转移矩阵A ,控制矩阵B默认为零
<< 1, 0, 1, 0,
0, 1, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1);
setIdentity(KF.measurementMatrix); //测量矩阵 H=[1,0,0,0;0,1,0,0]
setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); //系统噪声方差矩阵Q,高斯白噪声,单位阵
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1)); //测量噪声方差矩阵R,高斯白噪声,单位阵
setIdentity(KF.errorCovPost, Scalar::all(1)); //后验错误估计协方差矩阵P,初始化为单位阵
randn(KF.statePost, Scalar::all(0), Scalar::all(0.1)); //初始化状态为随机值
/*打开视频*/
VideoCapture cap(0);
/*保存视频信息*/
string outputVideoPath = "humman_detect.avi";
Size sWH = Size(640, 480);
VideoWriter outputVideo;
outputVideo.open(outputVideoPath, CV_FOURCC('M', 'P', '4', '2'), 20.0, sWH);
while (1) {
cap >> frame;
resize(frame, frame, Size(), 0.5, 0.5, INTER_LINEAR); //减少计算量
//rotate(frame, frame, ROTATE_90_COUNTERCLOCKWISE);
Mat prediction = KF.predict();
measurement.at<float>(0) = (float)preFace.leftTop.x;
measurement.at<float>(1) = (float)preFace.leftTop.y;
KF.correct(measurement); //更新测量值:输入形参:实际输出z(k),求出了卡尔曼增益Kk、后验更新状态向量x(k) 和 后验更新协方差矩阵p(k)
Point predict_pt = Point((int)prediction.at<float>(0), (int)prediction.at<float>(1)); //预测值(x',y')
detect_display(frame,faces);
/*找图像中的脸*/
int Max_area = 0;
int faceID = 0;
//找出最大的
for (int i = 0; i < faces.size(); i++) {
if ((int)(faces[i].width*faces[i].height) > Max_area) {
Max_area = (int)faces[i].width*faces[i].height;
faceID = i;
}
}
/*检测到脸,绘制当前最大的脸*/
if (faces.size() > 0) {
preFace.leftTop.x = faces[faceID].x;
preFace.leftTop.y = faces[faceID].y;
preFace.height = faces[faceID].height;
preFace.width = faces[faceID].width;
Point center(faces[faceID].x + faces[faceID].width*0.5, faces[faceID].y + faces[faceID].height*0.5);
ellipse(frame, center, Size(faces[faceID].width*0.5, faces[faceID].height*0.5), 0, 0, 360, Scalar(0, 255, 0), 1, 8, 0);
circle(frame, center, 3, Scalar(0, 255, 0), -1);
}
else {
Point center(preFace.leftTop.x + preFace.width*0.5, preFace.leftTop.y + preFace.height*0.5);
ellipse(frame, center, Size(preFace.width*0.5, preFace.height*0.5), 0, 0, 360, Scalar(0, 255, 0), 1, 8, 0);
circle(frame, center, 3, Scalar(0, 255, 0), -1);
}
/*显示效果*/
Point center(predict_pt.x + preFace.width*0.5, predict_pt.y + preFace.height*0.5);
ellipse(frame, center, Size(preFace.width*0.3, preFace.height*0.3), 0, 0, 360, Scalar(0, 0, 255), 4, 8, 0);
circle(frame, center, 3, Scalar(0, 0, 255), -1);
/*通过显示连续中心点画轨迹*/
if (tracks.size() != 30) { //插入30个点
tracks.push_back(center);
}
else {
tracks.pop_front();
tracks.push_back(center);
}
//遍历30个点
for (int i = 0; i<tracks.size(); i++) {
circle(frame, tracks.at(i), 1, Scalar(255, 255, 255), -1);
}
cout << tracks.size()<<endl;
resize(frame, frame, Size(), 2, 2, INTER_LINEAR);
imshow("detect_and_track", frame);
outputVideo << frame;
waitKey(1);
}
return 0;
}