1.时间检测类
#ifndef TIMESTRAMP_H
#define TIMESTRAMP_H
#include <stdio.h>
#include <iostream>
#ifndef _WIN32
#include <sys/time.h>
#else
#include <windows.h>
#endif
using namespace std;
#define TIME_DEBUG(str) \
cout << "[" << __DATE__ "] data:" << str << endl
#ifndef _WIN32
class timestramp{
public:
timestramp()
{
gettimeofday(&tpstart,NULL);
}
~timestramp()
{
gettimeofday(&tpend,NULL);
//timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;//注意,秒的读数和微秒的读数都应计算在内
timeuse = (1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec)/1000;
printf("used time:%fms\n",timeuse);
}
private:
struct timeval tpstart,tpend;
double timeuse;
};
#else
class timestramp{
private:
LARGE_INTEGER m_litmp;
LONGLONG QPart2;
LONGLONG QPart1;
double dfMinus, dfFreq, dfTim;
public:
timestramp(){
QueryPerformanceFrequency(&m_litmp);
dfFreq = (double)m_litmp.QuadPart;
QueryPerformanceCounter(&m_litmp);
QPart1 = m_litmp.QuadPart;
}
~timestramp(){
QueryPerformanceCounter(&m_litmp);
QPart2 = m_litmp.QuadPart;
dfMinus = (double)(QPart2 - QPart1);
dfTim = dfMinus / dfFreq * 1000;
//显示时间
std::string msg4 = "time:", msg3, msg5 = "ms";
char strTime[20] = "";
sprintf(strTime, "%.6lf", dfTim);
msg3 = strTime;
msg4 += msg3;
msg4 += msg5;
TIME_DEBUG(msg4.c_str());
}
};
#endif
#endif // TIMESTRAMP_H
2.人脸检测
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>
#include <iostream>
#include "timestramp.h"
#include <dlib/opencv.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\opencv.hpp>
using namespace dlib;
using namespace std;
//using namespace cv;
//resize image
cv::Mat MyResizeImage(cv::Mat pSrc, double dScale)
{
cv::Size sSize = cv::Size(pSrc.cols*dScale, pSrc.rows*dScale);
cv::Mat pDes = cv::Mat(sSize, CV_32S);
resize(pSrc, pDes, sSize);
return pDes;
}
// ----------------------------------------------------------------------------------------
int main(int argc, char** argv)
{
try
{
if (argc == 1)
{
cout << "Call this program like this:" << endl;
cout << "./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg" << endl;
cout << "\nYou can get the shape_predictor_68_face_landmarks.dat file from:\n";
cout << "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
return 0;
}
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor sp;
deserialize(argv[1]) >> sp;
image_window win, win_faces;
cv::VideoCapture cap("E:\\code\\alcode\\dlib-19.15\\examples\\Webcam_pose\\video.mp4");
if (!cap.isOpened())
{
cerr << "Unable to connect to camera" << endl;
return 1;
}
#if 1
while (1)
{
timestramp tp;
// Grab a frame
cv::Mat temp;
if (!cap.read(temp))
{
cap.set(CV_CAP_PROP_POS_FRAMES, 0);
continue;
//break;
}
cv::Mat pDes = MyResizeImage(temp, 0.4);
//cout << "processing image " << argv[i] << endl;
//cv_image<bgr_pixel> cimg(temp);
array2d<rgb_pixel> img;
dlib::assign_image(img, dlib::cv_image<dlib::bgr_pixel>(pDes));
//load_image(img, argv[i]);
// Make the image larger so we can detect small faces.
pyramid_up(img);
// Now tell the face detector to give us a list of bounding boxes
// around all the faces in the image.
std::vector<rectangle> dets = detector(img);
cout << "Number of faces detected: " << dets.size() << endl;
// Now we will go ask the shape_predictor to tell us the pose of
// each face we detected.
std::vector<full_object_detection> shapes;
for (unsigned long j = 0; j < dets.size(); ++j)
{
full_object_detection shape = sp(img, dets[j]);
cout << "number of parts: " << shape.num_parts() << endl;
cout << "pixel position of first part: " << shape.part(0) << endl;
cout << "pixel position of second part: " << shape.part(1) << endl;
// You get the idea, you can get all the face part locations if
// you want them. Here we just store them in shapes so we can
// put them on the screen.
shapes.push_back(shape);
}
// Now let's view our face poses on the screen.
win.clear_overlay();
win.set_image(img);
win.add_overlay(render_face_detections(shapes));
#if 0
// We can also extract copies of each face that are cropped, rotated upright,
// and scaled to a standard size as shown here:
dlib::array<array2d<rgb_pixel> > face_chips;
extract_image_chips(img, get_face_chip_details(shapes), face_chips);
win_faces.set_image(tile_images(face_chips));
cout << "Hit enter to process the next image..." << endl;
#endif
}
#endif
}
catch (exception& e)
{
cout << "\nexception thrown!" << endl;
cout << e.what() << endl;
}
}