seetaface6教程:封装人脸识别,人脸检测,,眼睛检测,状态,特征.....

1 篇文章 0 订阅
1 篇文章 0 订阅

seetaface是中科视扩开发的一个项目,目前,seetaface2是开源的,可以自己下载编译,我没有具体用过,所以不是很清楚,今年3月底,seetaface6出来了,它并不开源,但是可以免费做商用,而且其中的功能还是不错的,而且它不依赖于其他任何库,但是它对于opencv还是挺支持的,这个项目的主要功能包括
1,人脸检测
2,关键点提取
3,人脸识别
4,眼睛状态
5,/活体检测
6,人脸追踪
7,状态评估

 

我把它的大部分功能都封装成了一个类,大家需要注意我封装文件中所对应的模型文件路径,我把它都放在同目录的model下

这个项目我感觉是非常好用的,我用的是vs2017搭配的环境,至于配置方法,可以参考vs2017搭配opencv环境,百度一搜就有,

这个封装非常精炼,可以说涵盖了它大部分主要功能.话不多说,代码奉上

ps:你看不懂封装代码没关系,尽管我已经在里面写了很多注释,我在问着末尾写了测试代码,是针对封装代码的测试,通俗易懂

ps2:一定要注意封装代码的模型路径否则会出错

#pragma once
#include<opencv2/opencv.hpp>
#include<memory>
#include<iostream>

#include<seeta/FaceDetector.h>//人脸检测器
#include<seeta/FaceLandmarker.h>//关键点提取
#include<seeta/Common/Struct.h>
#include<seeta/FaceRecognizer.h>//人脸识别
#include<seeta/EyeStateDetector.h>//眼睛状态
#include<seeta/FaceAntiSpoofing.h>//活体检测
#include<seeta/FaceTracker.h>//人脸追踪

#include<seeta/QualityOfBrightness.h>//亮度评估,判断人脸亮度值是都正常,过量或过暗评价LOW,0,1,2
#include<seeta/QualityOfClarity.h>//清晰度评估
#include<seeta/QualityOfIntegrity.h>//人脸完整度评估,判断人脸是否完全进入摄像头
#include<seeta/QualityOfPose.h>//姿态评估,通过人脸5点坐标值来判断姿态是否为正面。
#include<seeta/QualityOfResolution.h>//分辨率评估
#include<seeta/QualityStructure.h>

//===============================补充
namespace seeta {
	class QualityOfNoMask : public QualityRule {
	public:
		QualityOfNoMask() {
			m_marker = std::make_shared<seeta::FaceLandmarker>(ModelSetting("./model/face_landmarker_mask_pts5.csta"));
		}
		QualityResult check(const SeetaImageData &image, const SeetaRect &face, const SeetaPointF *points, int32_t N) override {
			auto mask_points = m_marker->mark_v2(image, face);
			int mask_count = 0;
			for (auto point : mask_points) {
				if (point.mask) mask_count++;
			}
			QualityResult result;
			if (mask_count > 0) {
				return { QualityLevel::LOW, 1 - float(mask_count) / mask_points.size() };
			}
			else {
				return { QualityLevel::HIGH, 1 };
			}
		}
	private:
		std::shared_ptr<seeta::FaceLandmarker> m_marker;
	};
}



/*
使用任何一个模型都一定要初始化,否则它会为一个空的检测器
*/
namespace seeta_pacakge {
	//========/============================================
#define  Brightness 0
#define  Clarity 1
#define  Integrity 2
#define  Pose 3
#define Resolution 4
#define NoMask 5

//======================================================================================================存放检测器地址
	seeta::FaceLandmarker *facelandmarker;
	seeta::FaceDetector *facedector;
	seeta::FaceRecognizer *facerecognizer;
	seeta::EyeStateDetector *eyestatedector;
	seeta::FaceAntiSpoofing *faceantspoofing;
	seeta::FaceTracker *facetracker;
	seeta::QualityRule *qualityrelu;


	//======================================================================================================
	const char* FaceDector_path = "./model/face_detector.csta";//人脸检测模型
	const char* FaceLandmarker_path = "./model/face_landmarker_pts5.csta";//五点检测模型
	const char* FaceRecognizer_path = "./model/face_recognizer.csta";//人脸特征匹配和对比模型
	const char* EyeStateDetector_path = "./model/eye_state.csta";//眼睛状态检测模型
	const char* fasfirst_path = "./model/fas_first.csta";//局部活体
	const char* fassecond_path = "./model/fas_second.csta";//全局活体
	const char* face_landmarker_mask_pts5_path = "./model/face_landmarker_mask_pts5.ctsa";
	//===============================================================================================================人脸识别块=
	void facerecognizer_init()
	{
		seeta::ModelSetting setting;
		setting.append(FaceRecognizer_path);
		facerecognizer = new seeta::FaceRecognizer(setting);

	}
	
	//获取图片中特征
	float* extract_feature(const SeetaImageData& simg, std::vector<SeetaPointF> faces)
	{
		if (facerecognizer == NULL)
		{
			std::cout << "dont init facerecongizer";
			throw 1;
		}
		SeetaPointF points[5];
		for (int i = 0; i < 5; i++)
		{
			points[i] = faces.at(i);
			//	std::cout << points[i].x << "," << points[i].y << std::endl;
		}
		float* feature = new float[facerecognizer->GetExtractFeatureSize()];
		facerecognizer->Extract(simg, points, feature);
		return feature;

	}
//比较两个特征判断是否相似
	float compare(float* feature1, float* feature2)
	{
		return facerecognizer->CalculateSimilarity(feature1, feature2);
	}





	//====================================================================================人脸特征点提取块===================
	void facelandmarker_init()
	{
		seeta::ModelSetting setting;
		setting.append(FaceLandmarker_path);
		facelandmarker = new seeta::FaceLandmarker(setting);
	}


//提取图像中人脸的特征点
	std::vector<SeetaPointF> mark(const SeetaImageData& image, const SeetaRect& face)
	{
		if (facelandmarker == NULL)
		{
			std::cout << "dont init facelandmarker";
			throw 1;
		}
		return facelandmarker->mark(image, face);
		//这里检测到的5点坐标循序依次为,左眼中心、右眼中心、鼻尖、左嘴角和右嘴角。
	}
	//==================================================================================================人脸检测(base)======

	void facedector_init()
	{
		seeta::ModelSetting setting;
		setting.append(FaceDector_path);
		facedector = new seeta::FaceDetector(setting);
	}
	//检测人脸并放到数组中
	SeetaFaceInfoArray detect(const SeetaImageData &image)
	{
		if (facedector == NULL)
		{
			std::cout << "dont init facedector";
			throw 1;
		}
		return facedector->detect(image);
	}
//按人脸大小排列人脸数组
	void sort(SeetaFaceInfoArray face_sfia)
	{
		int m = face_sfia.size;
		std::vector<SeetaFaceInfo> faces(m);
		for (int i = 0; i < face_sfia.size; i++)
		{
			faces.at(i) = face_sfia.data[i];
		}
		std::partial_sort(faces.begin(), faces.begin() + 1, faces.end(), [](SeetaFaceInfo a, SeetaFaceInfo b) {
			return a.pos.width > b.pos.width;
		});
		for (int i = 0; i < face_sfia.size; i++)
		{
			face_sfia.data[i] = faces.at(i);
		}
	}


	//==========================================================眼部特征提取

	const char *EYE_STATE_STR[] = { "close", "open", "random", "unknown" };

	void eyestatedector_init()
	{
		seeta::ModelSetting setting;
		setting.append(EyeStateDetector_path);
		eyestatedector = new seeta::EyeStateDetector(setting);
	}

	int* eye_state(const SeetaImageData &img, const std::vector<SeetaPointF> &points)
	{

		if (eyestatedector == NULL)
		{
			std::cout << "eyestatedector dont init";
			throw 1;
		}
		seeta::EyeStateDetector::EYE_STATE left_eye, right_eye;
		int status[2];
		eyestatedector->Detect(img, points.data(), left_eye, right_eye);
		status[0] = left_eye;
		status[1] = right_eye;
		return status;
	}
	//==============================活体检测==================================================================
	const char *SPOOF_STATE_STR[] = { "real face",
	"spoof face",
	"unknown",
	"judging" };



	//0局部
	//1全局
	//2局部+全局
	void faceantspoofing_init(int version = 0)
	{
		seeta::ModelSetting setting;
		switch (version)
		{
		case 0:
			setting.append(fasfirst_path);
			break;
		case 1:
			setting.append(fassecond_path);
			break;
		case 2:
			setting.append(fasfirst_path);
			setting.append(fassecond_path);
			break;
		default:
			std::cout << "version input error";
			throw 2;
		}

		faceantspoofing = new seeta::FaceAntiSpoofing(setting);
	}

	int predict(const SeetaImageData &image,
		const SeetaRect &face,
		std::vector<SeetaPointF> v_points,
		int way = 0)//如果是0为单帧识别,1为多帧识别
	{

		if (faceantspoofing == NULL)
		{
			std::cout << "faceantspoofing dont init";
			throw 1;
		}

		SeetaPointF points[5];
		for (int i = 0; i < 5; i++)
		{
			points[i] = v_points.at(i);

		}

		int status;
		switch (way)
		{
		case 0:
			status = faceantspoofing->Predict(image, face, points);
			break;
		case 1:
			status = faceantspoofing->PredictVideo(image, face, points);
			break;
		default:
			std::cout << "way input error";
			throw 2;

		}
		auto status1 = faceantspoofing->PredictVideo(image, face, points);
		switch (status1) {
		case seeta::FaceAntiSpoofing::REAL:
			std::cout << "真实人脸" << std::endl; break;
		case seeta::FaceAntiSpoofing::SPOOF:
			std::cout << "攻击人脸" << std::endl; break;
		case seeta::FaceAntiSpoofing::FUZZY:
			std::cout << "无法判断" << std::endl; break;
		case seeta::FaceAntiSpoofing::DETECTING:
			std::cout << "正在检测" << std::endl; break;
		}
		return status;
	}

	void reset_video() {
		faceantspoofing->ResetVideo();
	}
	void set_frame(int32_t number)
	{
		faceantspoofing->SetVideoFrameCount(number);//默认是10;

	}

	//=======================================================================================================
	SeetaImageData mat_to_seetaImageData(cv::Mat& m)
	{
		SeetaImageData* sid = new SeetaImageData();
		sid->data = m.data;
		sid->channels = m.channels();
		sid->height = m.rows;
		sid->width = m.cols;
		return *sid;

	}
	//=======================================人脸追踪=====================================
	void facetracker_init(int width, int height)
	{
		seeta::ModelSetting setting;
		setting.append(FaceDector_path);
		facetracker = new seeta::FaceTracker(setting, width, height);
	}

	std::vector<SeetaTrackingFaceInfo> tracker(const SeetaImageData &image)
	{
		if (facetracker == NULL)
		{
			std::cout << "facetracker dont init";
			throw 1;
		}
		SeetaTrackingFaceInfoArray cfaces = facetracker->Track(image);
		std::vector<SeetaTrackingFaceInfo> faces(cfaces.data, cfaces.data + cfaces.size);
		return faces;
	}
	//========================质量评估==================================================
	void qualityrelu_init(int choose)
	{
		switch (choose)
		{
		case Brightness: qualityrelu = new seeta::QualityOfBrightness();//亮度
		case Clarity: qualityrelu = new seeta::QualityOfClarity();//清晰度
		case Integrity:qualityrelu = new seeta::QualityOfIntegrity();//完整度
		case Pose:qualityrelu = new seeta::QualityOfPose();//姿态
		case Resolution: qualityrelu = new seeta::QualityOfResolution();//分辨率
		case NoMask:qualityrelu = new seeta::QualityOfNoMask();
		}
	}
	const char *level_string[] = { "LOW", "MEDIUM", "HIGH" };
	int plot_quality(const SeetaImageData &image,
		const SeetaRect &face,
		const std::vector < SeetaPointF>& points)
	{
		seeta::QualityResult result = qualityrelu->check(image, face, points.data(), int(points.size()));
		return result.level;
	}
}

当然,这个包装库我写的时候也一一测试了功能了,测试代码奉上

#include"seeta_own.h"
#include<Windows.h>

using namespace std;
using namespace cv;
using namespace seeta_pacakge;
//打开E:/source/test.jpg检测人脸并显示
//打开E:/source/test2.jpg检测到人脸后与test.jpg的人脸进行匹配
//最后会打印这两张脸相似度,注意,如果一张照片有多张脸,这里设置的是只提取最大的一张脸
void test1()
{
	Mat m = imread("E:/source/test.jpg");

	facedector_init();
	facelandmarker_init();
	facerecognizer_init();

	SeetaImageData sid = mat_to_seetaImageData(m);
	SeetaFaceInfoArray sfia = detect(sid);
	sort(sfia);
	if (sfia.data <= 0)
	{
		cout << "未检测到脸";
		system("pause");
	}
	vector<SeetaPointF> spf = mark(sid, sfia.data[0].pos);

	rectangle(m, Rect(sfia.data[0].pos.x, sfia.data[0].pos.y, sfia.data[0].pos.width, sfia.data[0].pos.height), Scalar(0, 0, 255));
	for (int i = 0; i < 5; i++)
	{
		circle(m, Point(spf.at(i).x, spf.at(i).y), 5, Scalar(0, 255, 0));
	}
	imshow("1", m);

	float* feature = extract_feature(sid, spf);


	Mat m2 = imread("E:/source/test2.jpg");
	SeetaImageData sid2 = mat_to_seetaImageData(m2);
	SeetaFaceInfoArray sfia2 = detect(sid2);
	sort(sfia2);
	vector<SeetaPointF> spf2 = mark(sid2, sfia2.data[0].pos);


	float* feature2 = extract_feature(sid2, spf2);
	cout << "大小" << facerecognizer->GetExtractFeatureSize();
	std::cout << "相似度" << facerecognizer->CalculateSimilarity(feature, feature2) << std::endl;
	for (int j = 0; j < 2048; j++)
		std::cout << j << "...." << feature2[j] << std::endl;


	float* f = new float[1024];
	for (int o = 0; o < 1024; o++)
		f[o] = feature[o];

	std::cout << "2相似度" << compare(f, feature2);
	waitKey(0);
}
//打开摄像头,检测眼睛状态并显示以及打印
void test2()
{
	facedector_init();
	facelandmarker_init();
	eyestatedector_init();

	VideoCapture videocapture(0);
	Mat mat;
	while (videocapture.isOpened())
	{
		videocapture.read(mat);
		flip(mat, mat,1);
		SeetaImageData sid=mat_to_seetaImageData(mat);
		SeetaFaceInfoArray sfia=detect(sid);
		sort(sfia);
		vector<SeetaPointF> spf  =mark(sid, sfia.data[0].pos);
		int* eye=eye_state(sid, spf);
		cout << "左眼:" << eye[0] << EYE_STATE_STR[eye[0]]
			<< "   "<< "右眼:" << eye[1] << EYE_STATE_STR[eye[1]]<<std::endl;
		imshow("1", mat);
		waitKey(1);

	}
}
//读取test.jpg,打开摄像头,分析摄像头的人脸与test.jpg是否相似,并在控制台打印结果
void test3()
{
	facedector_init();
	facelandmarker_init();
	eyestatedector_init();
	facerecognizer_init();

	Mat ori = imread("E:/source/test2.jpg");
	SeetaImageData sid_ori = mat_to_seetaImageData(ori);
	SeetaFaceInfoArray sfia_ori = detect(sid_ori);
	sort(sfia_ori);
	vector<SeetaPointF> spf2=mark(sid_ori, sfia_ori.data[0].pos);
	float* ori_ss = extract_feature(sid_ori, spf2);

	VideoCapture videocapture(0);
	Mat mat;
	while (videocapture.isOpened())
	{
		videocapture.read(mat);
		flip(mat, mat, 1);
		SeetaImageData sid = mat_to_seetaImageData(mat);
		SeetaFaceInfoArray sfia = detect(sid);
		sort(sfia);
		vector<SeetaPointF> spf = mark(sid, sfia.data[0].pos);
		float* curr=extract_feature(sid, spf);
		std::cout << compare(curr, ori_ss) << std::endl;
		imshow("1", mat);
		waitKey(1);


	}
}

//下面的自己看吧,活体检测,状态检测等等

void test4()
{
	facedector_init();
	facelandmarker_init();
	eyestatedector_init();
	facerecognizer_init();
	faceantspoofing_init();
	VideoCapture videocapture(0);
	Mat mat;
	while (videocapture.isOpened())
	{
		videocapture.read(mat);
		flip(mat, mat, 1);
		SeetaImageData sid = mat_to_seetaImageData(mat);
		SeetaFaceInfoArray sfia = detect(sid);
		sort(sfia);
		vector<SeetaPointF> spf = mark(sid, sfia.data[0].pos);

		int status=predict(sid, sfia.data[0].pos, spf,1);
		cout << status << SPOOF_STATE_STR[status] << std::endl;
		imshow("1", mat);
		waitKey(1);
	}

}
void test5()
{
	facedector_init();
	facelandmarker_init();
	eyestatedector_init();
	facerecognizer_init();
	faceantspoofing_init();
	
	VideoCapture videocapture(0);
	facetracker_init(videocapture.get(CAP_PROP_FRAME_WIDTH),videocapture.get(CAP_PROP_FRAME_HEIGHT));
	Mat mat;
	while (videocapture.isOpened())
	{
		videocapture.read(mat);
		flip(mat, mat, 1);
		SeetaImageData sid = mat_to_seetaImageData(mat);
		auto tra=tracker(sid);
		for (int i = 0; i < tra.size(); i++)
		{
			SeetaTrackingFaceInfo id=tra.at(i);
			rectangle(mat, Rect(id.pos.x, id.pos.y, id.pos.width, id.pos.height), Scalar(255, 0, 0));
			putText(mat, to_string(id.PID), Point(id.pos.x, id.pos.y), FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 23, 0), 4, 8);
		}
		imshow("1", mat);
		waitKey(1);

	}
}
void test6()
{
	facedector_init();
	facelandmarker_init();
	eyestatedector_init();
	facerecognizer_init();
	faceantspoofing_init();
	qualityrelu_init(NoMask);
	VideoCapture videocapture(0);
	Mat mat;
	while (videocapture.isOpened())
	{
		videocapture.read(mat);
		flip(mat, mat, 1);
		SeetaImageData sid = mat_to_seetaImageData(mat);
		SeetaFaceInfoArray sfia = detect(sid);
		sort(sfia);
		vector<SeetaPointF> spf = mark(sid, sfia.data[0].pos);
		imshow("1", mat);
		waitKey(1);
		int status=plot_quality(sid, sfia.data[0].pos, spf);
		std::cout << level_string[status] << std::endl;
	}
}
int main()
{
	test6();
}


 

如果有什么不对的地方可以给我留言

封装是没有问题的,我已经多次使用这个封装做过项目,如果遇到其他问题也可以给我留言

 

 

  • 5
    点赞
  • 38
    收藏
    觉得还不错? 一键收藏
  • 28
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 28
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值