qt利用opencv3.4进行人脸识别和特征点提取
- 1
本文是用qt5.6.3和opencv3.4、opencv_contrib3.4在windows平台下实现人脸识别以及特征值提取
**
- 2
**
qt调用opencv可以自己编译也可以使用别人利用mingw编译好的库,现在常用的方法就是利用VS编译,也可以利用qt自己进行编译。但是两者都要下载cmake,安装的时候要选择第二个自动加入环境变量,如果选择第一个需要自己手动添加入环境变量。
1.用cmake编译
选择第二个,然后选择好自己的源文件目录和要生成的文件目录,源文件目录就是opencv下的source,目标文件可以自己新建一个,点击configure,一开始要自己选择用什么编译器,把自己qt的gcc.exe和g++.exe的绝对路径加入进去,我的路径是在D:\Qt\Qt5.6.3\Tools\mingw492_32\bin这个文件夹下面的,大家可以安装自己安装路径进行寻找。
取消BUILD_opencv_python3、BUILD_opencv_python_bingings_generator、WITH_MATLAB、BUILD_opencv_hdf、ENABLE_PRECOMPILED_HEADERS、WITH_IPP,勾选ENABLE_CXX11、WITH_QT、WITH_OPENGL。我们这里还要添加opencv3.4_contrib这个库,我们先在search打入OPENCV_EXTRA_MODULED_PATH,把自己库的文件目录加进去。
在进行configure然后generate,如果一开始有错误可能是没有添加D:\Qt\Qt5.6.3\Tools\mingw492_32\bin\mingw32-make.exe,下面会报错在search中寻找然后把自己的mingw32-make.exe文件目录放进去。
最后在编译好的文件目录下shift+右键进行cmd命令,打入mingw32-make如果报错,可能是qt没有加入环境变量,把qt的mingw32-make.exe文件copy到这个文件夹下,输入.\mingw32-make可以解决。
2.用qt和cmake编译
装好cmake,打开qt可以看到里面有cmake安装好的路径。这里可以参考https://blog.csdn.net/scien2011/article/details/52830794文章,这里就不赘述了。
3.编译好了文件,我们就可以用opencv的库进行人脸识别了。
这里直接给上源码pro文文件
PRO文件
QT += core gui sql
greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
TARGET = CameraManage
TEMPLATE = app
SOURCES += main.cpp\
cameramanage.cpp
HEADERS += cameramanage.h
FORMS += cameramanage.ui
INCLUDEPATH += D:/Qt/release/install/include \
D:/Qt/release/install/include/opencv \
D:/Qt/release/install/include/opencv2
LIBS += -L D:/Qt/release/install/x86/mingw/lib/libopencv_*
LIBS += -L D:/Qt/release/bin/libopencv_*.dll
.h文件
#ifndef CAMERAMANAGE_H
#define CAMERAMANAGE_H
#include <QMainWindow>
#include <QWidget>
#include <QImage>
#include <QLabel>
#include <QTimer> // 设置采集数据的间隔时间
#include <iostream>
#include <iterator>
#include <vector>
#include "opencv2/highgui/highgui.hpp" //包含opencv库头文件
#include "opencv/cv.h"
#include <opencv2/opencv.hpp>
#include <opencv2/face.hpp>
using namespace std;
using namespace cv;
using namespace cv::face;
#define COLOR Scalar(255,200,0)
namespace Ui {
class CameraManage;
}
class CameraManage : public QMainWindow
{
Q_OBJECT
public:
explicit CameraManage(QWidget *parent = 0);
~CameraManage();
QImage Mat2QImage(Mat& cvImg);
QImage ScaleImage2Label(QImage qImage, QLabel* qLabel);
private slots:
void on_open_clicked(); // 打开摄像头
void readFarme(); // 读取当前帧信息
void on_close_clicked(); // 关闭摄像头
void on_takePhono_clicked(); // 拍照
void on_faceDete_clicked(); // 人脸检测
void on_videoDete_clicked();
void drawPolyline
(
Mat &im,
const vector<Point2f> &landmarks,
const int start,
const int end,
bool isClosed = false
);
void drawLandmarks(Mat &im, vector<Point2f> &landmarks);
private:
// void detectAndDraw(Mat& img);
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
CascadeClassifier& nestedCascade,
double scale, bool tryflip );
// void detectAndDraw( Mat& img, CascadeClassifier& cascade,
// CascadeClassifier& nestedCascade,
// double scale, bool tryflip );
private:
Ui::CameraManage *ui;
QTimer *timer;
QTimer *timer_1;
QImage image;
CvCapture *cam;// 视频获取结构, 用来作为视频获取函数的一个参数
Mat frame;
Mat fram_dete;
VideoCapture cap;
VideoCapture capDete;
CascadeClassifier face_cascade;
Ptr<face::Facemark> facemark;
};
#endif // CAMERAMANAGE_H
.h文件
#include "cameramanage.h"
#include "ui_cameramanage.h"
#include <QPixmap>
#include <QFileDialog>
#include <QPicture>
#include <opencv2/objdetect.hpp>
#include <iostream>
//static CvHaarClassifierCascade* cascade = 0;
//static CvMemStorage* storage = 0;
const char* cascade_name ="haarcascade_frontalface_alt.xml"; //人脸检测要用到的分类器
const char* cascade_name_class ="haarcascade_eye_tree_eyeglasses.xml";
CameraManage::CameraManage(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::CameraManage)
{
ui->setupUi(this);
// Create an instance of Facemark
facemark = face::FacemarkLBF::create();
// Load landmark detector
facemark->loadModel("E:\\QT_Work\\CameraManage1\\lbfmodel.yaml");
if(!face_cascade.load("E:\\QT_Work\\CameraManage1\\haarcascade_frontalface_alt.xml"))
cout << "--(!)Error loading face cascade\n";
cam = NULL;
timer = new QTimer(this);
timer_1 = new QTimer(this);
// image = new QImage(); // 初始化
/*信号和槽*/
connect(timer, SIGNAL(timeout()), this, SLOT(readFarme())); // 时间到,读取当前摄像头信息
connect(timer, SIGNAL(timeout()), this, SLOT(readFarme())); // 时间到,读取当前摄像头信息
// connect(timer_1, SIGNAL(timeout()), this, SLOT(on_videoDete_clicked()));
connect(ui->open, SIGNAL(clicked()), this, SLOT(on_open_clicked()));
connect(ui->close, SIGNAL(clicked()), this, SLOT(on_close_clicked()));
connect(ui->takePhono, SIGNAL(clicked()), this, SLOT(on_takePhono_clicked()));
connect(ui->videoDete, SIGNAL(clicked()), this, SLOT(on_videoDete_clicked()));
}
CameraManage::~CameraManage()
{
delete ui;
delete timer;
// delete image;
}
QImage CameraManage::Mat2QImage(Mat& cvImg)
{
QImage qImg;
if(cvImg.channels()==3) //3 channels color image
{
cv::cvtColor(cvImg,cvImg,CV_BGR2RGB);
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols, cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_RGB888);
}
else if(cvImg.channels()==1) //grayscale image
{
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols,cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_Indexed8);
}
else
{
qImg =QImage((const unsigned char*)(cvImg.data),
cvImg.cols,cvImg.rows,
cvImg.cols*cvImg.channels(),
QImage::Format_RGB888);
}
return qImg;
}
QImage CameraManage::ScaleImage2Label(QImage qImage, QLabel* qLabel)
{
QImage qScaledImage;
QSize qImageSize = qImage.size();
QSize qLabelSize = qLabel->size();
double dWidthRatio = 1.0*qImageSize.width() / qLabelSize.width();
double dHeightRatio = 1.0*qImageSize.height() / qLabelSize.height();
if (dWidthRatio>dHeightRatio)
{
qScaledImage = qImage.scaledToWidth(qLabelSize.width());
}
else
{
qScaledImage = qImage.scaledToHeight(qLabelSize.height());
}
return qScaledImage;
}
void CameraManage::on_open_clicked()
{
cap.open(0);
timer->start(33); // 开始计时,超时则发出timeout()信号
}
void CameraManage::readFarme()
{
cap >> frame;
/*
// imshow("nihao",frame);
image = Mat2QImage(frame);
// image = QImage(image.scaled(240, 120)); // change for the usb camera
QImage scaleImage = ScaleImage2Label( image, ui->lable ); // 显示到label上
ui->lable->setPixmap(QPixmap::fromImage(scaleImage));
ui->lable->setAlignment(Qt::AlignCenter);
ui->lable->show();*/
Mat gray, frame1, frame2;
vector<Rect> faces;
cvtColor(frame, gray, COLOR_BGR2GRAY);
cvtColor(frame, frame1, COLOR_BGR2RGB);
QImage img((const uchar*)frame1.data,
frame1.cols, frame1.rows,
frame1.cols * frame1.channels(),
QImage::Format_RGB888);
//ui->lable->setPixmap(QPixmap::fromImage(img));
QImage scaleImage = ScaleImage2Label( img, ui->lable );
ui->lable->setPixmap(QPixmap::fromImage(scaleImage));
ui->lable->setAlignment(Qt::AlignCenter);
ui->lable->show();
equalizeHist(gray, gray);
//-- Detect faces
face_cascade.detectMultiScale( gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(60, 60) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
}
cvtColor(frame, frame2, COLOR_BGR2RGB);
QImage img1((const uchar*)frame2.data,
frame2.cols, frame2.rows,
frame2.cols * frame2.channels(),
QImage::Format_RGB888);
//ui->lable_2->setPixmap(QPixmap::fromImage(img1));
QImage scaleImage1 = ScaleImage2Label( img1, ui->lable_2 );
ui->lable_2->setPixmap(QPixmap::fromImage(scaleImage1));
ui->lable_2->setAlignment(Qt::AlignCenter);
ui->lable_2->show();
vector< vector<Point2f> > landmarks;
// Run landmark detector
bool success = facemark->fit(frame,faces,landmarks);
if(success)
{
// If successful, render the landmarks on the face
for(int i = 0; i < landmarks.size(); i++)
{
drawLandmarks(frame, landmarks[i]);
drawFacemarks(frame, landmarks[i], Scalar(0, 0, 255));
}
}
imshow("Facial Landmark Detection", frame);
}
void CameraManage::on_close_clicked()
{
timer->stop(); // 停止读取数据。
cap.release(); //释放内存
frame.release();
ui->lable->setText("video closed");
}
void CameraManage::on_takePhono_clicked()
{
bool b = image.save("01.jpg","JPG");//保存为图片文件
if(!b)
return;
}
void CameraManage::on_faceDete_clicked()
{
Mat image_dete;
const char* filename = "01.jpg";//待检测图像(包含绝对路径)
image_dete = imread(filename, CV_LOAD_IMAGE_COLOR); //加载图像
// detectAndDraw( image_dete);
}
/*************************
********* method two ***********
**************************/
void CameraManage::detectAndDraw( Mat& img, CascadeClassifier& cascade,
CascadeClassifier& nestedCascade,
double scale, bool tryflip )
{
double t = 0;
vector<Rect> faces, faces2;
const static Scalar colors[] =
{
Scalar(255,0,0),
Scalar(255,128,0),
Scalar(255,255,0),
Scalar(0,255,0),
Scalar(0,128,255),
Scalar(0,255,255),
Scalar(0,0,255),
Scalar(255,0,255)
};
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
double fx = 1 / scale;
cv::resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
if( tryflip )
{
flip(smallImg, smallImg, 1);
cascade.detectMultiScale( smallImg, faces2,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); ++r )
{
faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
}
}
t = (double)getTickCount() - t;
printf( "detection time = %g ms\n", t*1000/getTickFrequency());
for ( size_t i = 0; i < faces.size(); i++ )
{
Rect r = faces[i];
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
Scalar color = colors[i%8];
int radius;
double aspect_ratio = (double)r.width/r.height;
if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
{
center.x = cvRound((r.x + r.width*0.5)*scale);
center.y = cvRound((r.y + r.height*0.5)*scale);
radius = cvRound((r.width + r.height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
else
rectangle( img, cvPoint(cvRound(r.x*scale), cvRound(r.y*scale)),
cvPoint(cvRound((r.x + r.width-1)*scale), cvRound((r.y + r.height-1)*scale)),
color, 3, 8, 0);
if( nestedCascade.empty() )
continue;
smallImgROI = smallImg( r );
nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
//|CASCADE_DO_CANNY_PRUNING
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
for ( size_t j = 0; j < nestedObjects.size(); j++ )
{
Rect nr = nestedObjects[j];
center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale);
center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale);
radius = cvRound((nr.width + nr.height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
}
// imshow( "result", img );
QImage image_de = Mat2QImage( img );
QImage scaleImage = ScaleImage2Label( image_de, ui->lable );
ui->lable->setPixmap(QPixmap::fromImage(scaleImage));
ui->lable->setAlignment(Qt::AlignCenter);
ui->lable->show();
}
void CameraManage::on_videoDete_clicked()
{
// timer_1->start(33);
CascadeClassifier cascade, nestedCascade;
if( !cascade.load(cascade_name) || !nestedCascade.load(cascade_name_class) )
{
fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
return ;
}
capDete.open(0);
bool tryflip = 1;
double scale = 1;
if( capDete.isOpened() )
{
for(;;)
{
capDete >> fram_dete;
if( frame.empty() )
break;
Mat frame1 = fram_dete.clone();
detectAndDraw( frame1, cascade, nestedCascade, scale, tryflip );
char c = (char)waitKey(10);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
}
// drawPolyLine draws a poly line by joining
// successive points between the start and end indices.
void CameraManage::drawPolyline
(
Mat &im,
const vector<Point2f> &landmarks,
const int start,
const int end,
bool isClosed
)
{
// Gather all points between the start and end indices
vector <Point> points;
for (int i = start; i <= end; i++)
{
points.push_back(cv::Point(landmarks[i].x, landmarks[i].y));
}
// Draw polylines.
polylines(im, points, isClosed, COLOR, 2, 16);
}
void CameraManage::drawLandmarks(Mat &im, vector<Point2f> &landmarks)
{
// Draw face for the 68-point model.
if (landmarks.size() == 68)
{
drawPolyline(im, landmarks, 0, 16); // Jaw line
drawPolyline(im, landmarks, 17, 21); // Left eyebrow
drawPolyline(im, landmarks, 22, 26); // Right eyebrow
drawPolyline(im, landmarks, 27, 30); // Nose bridge
drawPolyline(im, landmarks, 30, 35, true); // Lower nose
drawPolyline(im, landmarks, 36, 41, true); // Left eye
drawPolyline(im, landmarks, 42, 47, true); // Right Eye
drawPolyline(im, landmarks, 48, 59, true); // Outer lip
drawPolyline(im, landmarks, 60, 67, true); // Inner lip
}
else
{ // If the number of points is not 68, we do not know which
// points correspond to which facial features. So, we draw
// one dot per landamrk.
for(int i = 0; i < landmarks.size(); i++)
{
circle(im,landmarks[i],3, COLOR, FILLED);
}
}
}
程序中的lbfmodel.yaml是人脸特征值的提取,haarcascade_frontalface_alt.xml为人脸识别的库,这两个文件下载连接为链接1(百度云网盘):https://pan.baidu.com/s/16PZ-McVgRwB3bH1Y2fEWBA 密码:x8be
实际效果图:
如果有不懂的可以发我邮箱920386029@qq.com