using namespace std;
using namespace cv;
using namespace seeta;
int main()
{
seeta::FaceDetection detector(“model/seeta_fd_frontal_v1.0.bin”);
detector.SetMinFaceSize(40);
detector.SetScoreThresh(2.f);
detector.SetImagePyramidScaleFactor(0.8f);
detector.SetWindowStep(4, 4);
cv::Mat img = cv::imread(“1.jpg”, cv::IMREAD_UNCHANGED);
cv::Mat img_gray;
if (img.channels() != 1)
{
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
}
else
{
img_gray = img;
}
seeta::ImageData img_data;
img_data.data = img_gray.data;
img_data.width = img_gray.cols;
img_data.height = img_gray.rows;
img_data.num_channels = 1;
long t0 = cv::getTickCount();
std::vector<seeta::FaceInfo> faces = detector.Detect(img_data);
long t1 = cv::getTickCount();
double secs = (t1 - t0) / cv::getTickFrequency();
cv::Rect face_rect;
int32_t num_face = static_cast<int32_t>(faces.size());
for (int32_t i = 0; i < num_face; i++) {
face_rect.x = faces[i].bbox.x;
face_rect.y = faces[i].bbox.y;
face_rect.width = faces[i].bbox.width;
face_rect.height = faces[i].bbox.height;
cv::rectangle(img, face_rect, CV_RGB(0, 255, 0), 2, 8, 0);
}
cv::namedWindow(“Win7”, cv::WINDOW_AUTOSIZE);
cv::imshow(“Win7”,img);
cv::waitKey(0);
cv::destroyAllWindows();
system(“pause”);
return 0;
}
2. 人脸校正
#include <stdlib.h>
#include <iostream>
#include <string>
#include "opencv2\highgui.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2\imgcodecs.hpp"
#include "opencv2\core.hpp"
#include "face_detection.h"
#include "face_alignment.h"
#pragma comment(lib,“opencv_highgui330.lib”)
#pragma comment(lib,“opencv_imgproc330.lib”)
#pragma comment(lib,“opencv_imgcodecs330.lib”)
#pragma comment(lib,“opencv_core330.lib”)
#pragma comment(lib,“FaceDetection.lib”)
#pragma comment(lib,“SeetaFaceAlignment.lib”)
using namespace std;
using namespace cv;
using namespace seeta;
int main()
{
seeta::FaceDetection detector(“model/seeta_fd_frontal_v1.0.bin”);
seeta::FaceAlignment point_detector(“model/seeta_fa_v1.1.bin”);
detector.SetMinFaceSize(40);
detector.SetScoreThresh(2.f);
detector.SetImagePyramidScaleFactor(0.8f);
detector.SetWindowStep(4, 4);
cv::Mat img = cv::imread(“000.jpg”, cv::IMREAD_UNCHANGED);
cv::Mat img_gray;
if (img.channels() != 1)
{
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
}
else
{
img_gray = img;
}
seeta::ImageData img_data;
img_data.data = img_gray.data;
img_data.width = img_gray.cols;
img_data.height = img_gray.rows;
img_data.num_channels = 1;
long t0 = cv::getTickCount();
std::vector<seeta::FaceInfo> faces = detector.Detect(img_data);
long t1 = cv::getTickCount();
double secs = (t1 - t0) / cv::getTickFrequency();
int pts_num = 5;
seeta::FacialLandmark points[5];
cv::Rect face_rect;
int32_t num_face = static_cast<int32_t>(faces.size());
for (int32_t i = 0; i < num_face; ++i) {
face_rect.x = faces[i].bbox.x;
face_rect.y = faces[i].bbox.y;
face_rect.width = faces[i].bbox.width;
face_rect.height = faces[i].bbox.height;
cv::rectangle(img, face_rect, CV_RGB(0, 255, 0), 2, 8, 0);
point_detector.PointDetectLandmarks(img_data, faces[i], points);
for (int j = 0; j<pts_num; ++j)
{
cv::circle(img, cvPoint(points[j].x, points[j].y), 2, CV_RGB(0, 255, 0), CV_FILLED);
}
}
cv::resize(img, img, cv::Size(img.cols * 1, img.rows * 1));
cv::namedWindow(“Win7”, cv::WINDOW_AUTOSIZE);
cv::imshow(“Win7”, img);
cv::waitKey(0);
cv::destroyAllWindows();
system(“pause”);
return 0;
}
3.人脸识别
#include <stdlib.h>
#include <iostream>
#include <string>
#include "opencv2\highgui.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2\imgcodecs.hpp"
#include "opencv2\core.hpp"
#include "face_detection.h"
#include "face_alignment.h"
#include "face_identification.h"
#pragma comment(lib,"opencv_highgui330.lib")
#pragma comment(lib,"opencv_imgproc330.lib")
#pragma comment(lib,"opencv_imgcodecs330.lib")
#pragma comment(lib,"opencv_core330.lib")
#pragma comment(lib,"FaceDetection.lib")
#pragma comment(lib,"SeetaFaceAlignment.lib")
#pragma comment(lib,"FaceIdentification.lib")
using namespace std;
using namespace cv;
using namespace seeta;
int main()
{
seeta::FaceDetection detector(“model/seeta_fd_frontal_v1.0.bin”);
seeta::FaceAlignment point_detector(“model/seeta_fa_v1.1.bin”);
seeta::FaceIdentification face_recognizer(“model/seeta_fr_v1.0.bin”);
detector.SetMinFaceSize(40);
detector.SetScoreThresh(2.f);
detector.SetImagePyramidScaleFactor(0.8f);
detector.SetWindowStep(4, 4);
cv::Mat img = cv::imread(“Dilraba3.jpg”, cv::IMREAD_UNCHANGED);
cv::Mat _img = img.clone();
ImageData src_img_data(_img.cols, _img.rows, _img.channels());
src_img_data.data = _img.data;
cv::Mat img_gray;
if (img.channels() != 1)
{
cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);
}
else
{
img_gray = img;
}
seeta::ImageData img_data;
img_data.data = img_gray.data;
img_data.width = img_gray.cols;
img_data.height = img_gray.rows;
img_data.num_channels = 1;
long t0 = cv::getTickCount();
std::vector<seeta::FaceInfo> faces = detector.Detect(img_data);
long t1 = cv::getTickCount();
double secs = (t1 - t0) / cv::getTickFrequency();
int pts_num = 5;
seeta::FacialLandmark points[5];
cv::Rect face_rect;
int32_t num_face = static_cast<int32_t>(faces.size());
for (int32_t i = 0; i < num_face; ++i) {
face_rect.x = faces[i].bbox.x;
face_rect.y = faces[i].bbox.y;
face_rect.width = faces[i].bbox.width;
face_rect.height = faces[i].bbox.height;
cv::rectangle(img, face_rect, CV_RGB(0, 255, 0), 2, 8, 0);
point_detector.PointDetectLandmarks(img_data, faces[i], points);
for (int j = 0; j<pts_num; ++j)
{
cv::circle(img, cvPoint(points[j].x, points[j].y), 2, CV_RGB(0, 255, 0), CV_FILLED);
}
}
cv::Mat dst_img(face_recognizer.crop_height(),face_recognizer.crop_width(),CV_8UC(face_recognizer.crop_channels()));
ImageData dst_img_data(dst_img.cols, dst_img.rows, dst_img.channels());
dst_img_data.data = dst_img.data;
face_recognizer.CropFace(src_img_data,points,dst_img_data);
cv::imwrite(“Dilraba3Crop.jpg”, dst_img);
cv::imshow(“Crop Face”, dst_img);
int feat_size = face_recognizer.feature_size();
if (feat_size != 2048)
{
return 1;
}
cv::Mat src_img = cv::imread(“Dilraba1Crop.jpg”,cv::IMREAD_COLOR);
cv::imshow(“Probe Face”, src_img);
cv::resize(src_img,src_img, cv::Size(face_recognizer.crop_height(),face_recognizer.crop_width()));
ImageData _src_img_data(src_img.cols, src_img.rows, src_img.channels());
_src_img_data.data = src_img.data;
float * feats = new float[face_recognizer.feature_size()];
float probe_fea[2048];
face_recognizer.ExtractFeature(dst_img_data, feats);
face_recognizer.ExtractFeature(_src_img_data, probe_fea);
float sim = face_recognizer.CalcSimilarity(feats,probe_fea);
std::cout << sim << std::endl;
cv::resize(img, img, cv::Size(img.cols * 1, img.rows * 1));
cv::namedWindow(“Win7”, cv::WINDOW_AUTOSIZE);
cv::imshow(“Win7”, img);
cv::waitKey(0);
cv::destroyAllWindows();
system(“pause”);
delete[] feats;
return 0;
}