CLM框架的使用也就是CLM函数的使用方法,所以下面我的代码是用来进行照片中人脸关键点的定位,看完之后对这个流程应该会变得清晰起来。
#include "CLM_core.h"
#include <fstream>
#include <sstream>
#include <opencv2/videoio/videoio.hpp>
#include <opencv2/videoio/videoio_c.h>
using namespace std;
using namespace cv;
vector<string> get_arguments(int argc, char **argv)
{
vector<string> arguments;
for (int i = 0; i < argc; ++i)
{
arguments.push_back(string(argv[i]));
}
return arguments;
}
void image_show(Mat& captured_image, Mat_<float>& depth_image, const CLMTracker::CLM& clm_model, const CLMTracker::CLMParameters& clm_parameters, double fx, double fy, double cx, double cy)
{
double detection_certainty = clm_model.detection_certainty;
bool detection_success = clm_model.detection_success;
double visualisation_boundary = 0.2;
if (detection_certainty < visualisation_boundary)
{
CLMTracker::Draw(captured_image, clm_model);
double vis_certainty = detection_certainty;
if (vis_certainty > 1)
vis_certainty = 1;
if (vis_certainty < -1)
vis_certainty = -1;
vis_certainty = (vis_certainty + 1) / (visualisation_boundary + 1);
// A rough heuristic for box around the face width
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy);
// Draw it in reddish if uncertain, blueish if certain
CLMTracker::DrawBox(captured_image, pose_estimate_to_draw, Scalar((1 - vis_certainty)*255.0, 0, vis_certainty * 255), thickness, fx, fy, cx, cy);
}
if (!clm_parameters.quiet_mode)
{
namedWindow("tracking_result", 1);
imshow("tracking_result", captured_image);
if (!depth_image.empty())
{
// Division needed for visualisation purposes
imshow("depth", depth_image / 2000.0);
}
}
}
int main(int argc, char **argv)
{
vector<string> arguments = get_arguments(argc, argv);
vector<string> files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files;
int device = 0;
CLMTracker::CLMParameters clm_parameters(arguments);
bool use_world_coordinates;
CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_world_coordinates, arguments);
CLMTracker::CLM clm_model(clm_parameters.model_location);
float fx = 0, fy = 0, cx = 0, cy = 0;
CLMTracker::get_camera_params(device, fx, fy, cx, cy, arguments);
bool cx_undefined = false;
bool fx_undefined = false;
if (cx == 0 || cy == 0)
{
cx_undefined = true;
}
if (fx == 0 || fy == 0)
{
fx_undefined = true;
}
bool done = false;
Mat src = imread(files[0]);
imshow("image", src);
// 未定义光学中心,利用图像中心
if (cx_undefined)
{
cx = src.cols / 2.0f;
cy = src.rows / 2.0f;
}
// 假设焦距
if (fx_undefined)
{
fx = 500 * (src.cols / 640.0);
fy = 500 * (src.rows / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
if (!src.empty())
{
Mat_<float> depth_image;
Mat_<uchar> grayscale_image;
if (src.channels() == 3)
{
cvtColor(src, grayscale_image, CV_BGR2GRAY);
}
else
{
grayscale_image = src.clone();
}
// 面部捕捉追踪的函数
bool detection_success = CLMTracker::DetectLandmarksInVideo(grayscale_image, depth_image, clm_model, clm_parameters);
// Work out the pose of the head from the tracked model
Vec6d pose_estimate_CLM;
if (use_world_coordinates)
{
pose_estimate_CLM = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy);
}
else
{
pose_estimate_CLM = CLMTracker::GetCorrectedPoseCamera(clm_model, fx, fy, cx, cy);
}
// 可视化结果
// 面部标记点和边界框的显示;
image_show(src, depth_image, clm_model, clm_parameters, fx, fy, cx, cy);
}
waitKey(0);
return 0;
}