#include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include "time.h"
#include <dlib/opencv.h>
#include <opencv2/highgui/highgui.hpp>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace cv::dnn;
using namespace std;
using namespace dlib;
const size_t inWidth = 300;
const size_t inHeight = 300;
const double inScaleFactor = 1.0; //缩放比例
const Scalar meanVal(104.0, 177.0, 123.0); //均值
int main( )
{
float min_confidence = 0.5;
String modelConfiguration = "/home/xxp/Desktop/face/deploy.prototxt";
String modelBinary = "/home/xxp/Desktop/face/res10_300x300_ssd_iter_140000.caffemodel";
dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
shape_predictor pos_modle;
deserialize("/home/xxp/Desktop/face/shape_predictor_68_face_landmarks.dat") >> pos_modle;
VideoCapture cap(0);
for (;;)
{
Mat frame;
cap >> frame;
Mat inputBlob = blobFromImage(frame, inScaleFactor,
Size(inWidth, inHeight), meanVal, false, false);
net.setInput(inputBlob, "data");
Mat detection = net.forward("detection_out");
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
float confidenceThreshold = min_confidence;
for (int i = 0; i < detectionMat.rows; i++)
{
//分类精度
float confidence = detectionMat.at<float>(i, 2);
if (confidence > confidenceThreshold)
{ //左下 右上
int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);
cv::Rect object((int)xLeftBottom, (int)yLeftBottom, (int)(xRightTop - xLeftBottom),
(int)(yRightTop - yLeftBottom));
cv::rectangle(frame, object, Scalar(0, 255, 0),8);
dlib::rectangle dlibRect((int)xLeftBottom, (int)yLeftBottom, (int)(xRightTop - xLeftBottom),
(int)(yRightTop - yLeftBottom));
cv::cvtColor(frame,frame,cv::COLOR_BGR2GRAY);
//dlib::cv_image<unsigned char> cimg(frame);
dlib::full_object_detection shape = pos_modle(dlib::cv_image<uchar>(frame), dlibRect);
//std::vector<dlib::rectangle> faces = detector(frame);
std::vector<full_object_detection> shapes;
shapes.push_back(shape);
if (!shapes.empty())
{
int faceNumber = shapes.size();
for (int j = 0; j < faceNumber; j++)
{
for (int i = 0; i < 68; i++)
{
cv::circle(frame, cvPoint(shapes[j].part(i).x(), shapes[j].part(i).y()), 1, cv::Scalar(0, 0, 255), -1);
//cv::putText(temp,to_string(i), cvPoint(shapes[0].part(i).x(), shapes[0].part(i).y()), CV_FONT_HERSHEY_PLAIN,1, cv::Scalar(0, 0, 255));
//cout << shapes[0].part(38).y() <<" "<< shapes[0].part(40).y()<<endl;
}
}
}
}
}
cv::namedWindow("detections", 0);
cv::imshow("detections", frame);
if (waitKey(1) >= 0) break;
}
return 0;
}
cmakelist
cmake_minimum_required(VERSION 2.8.12)
project(examples)
add_subdirectory(/usr/local/arm/opencv-3.4.5/dlib dlib_build)
macro(add_example name)
add_executable(${name} ${name}.cpp)
target_link_libraries(${name} dlib::dlib )
endmacro()
# if an example requires GUI, call this macro to check DLIB_NO_GUI_SUPPORT to include or exclude
macro(add_gui_example name)
if (DLIB_NO_GUI_SUPPORT)
message("No GUI support, so we won't build the ${name} example.")
else()
add_example(${name})
endif()
endmacro()
if (DLIB_NO_GUI_SUPPORT)
message("No GUI support, so we won't build the webcam_face_pose_ex example.")
else()
find_package(OpenCV QUIET)
if (OpenCV_FOUND)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(camera camera.cpp)
target_link_libraries(camera dlib::dlib ${OpenCV_LIBS} )
else()
message("OpenCV not found, so we won't build the webcam_face_pose_ex example.")
endif()
endif()
#here we apply our macros
if (DLIB_LINK_WITH_SQLITE3)
add_example(sqlite_ex)
endif()