seetaface6 android build(一)

前言

2020年3月31日,中科视拓宣布开放SeetaFace6人脸识别算法。
SeetaFace6包含人脸识别的基本能力:人脸检测、关键点定位、人脸识别,同时增加了活体检测、质量评估、年龄性别估计,并且顺应实际应用需求,开放口罩检测以及口罩佩戴场景下的人脸识别模型。
与去年开源的SeetaFace2相比,SeetaFace6采用了商用版最新的推理引擎TenniS,ResNet50的推理速度,从SeetaFace2在I7的8FPS提升到了20FPS。SeetaFace6还大幅度提高人脸识别训练集的规模,人脸识别数据量增加到了上亿张图片

1: 下载
https://github.com/SeetaFace6Open/index
2:编译
各种模块下android/jni 执行 ndk-build ,先编译 OpenRoleZoo,SeetaAuthorize,TenniS
可以修改下Application.mk (根据自己需求) 因为默认的android目标版本太低了,根本不需要
模型下下载地址
https://github.com/seetafaceengine/SeetaFace6
在这里插入图片描述

在这里插入图片描述

在这里插入图片描述
编译完反正有一堆文件
在这里插入图片描述

3:OPENCV adnroid下载
https://opencv.org/releases/
当前最新版 4.6 包含有现成的 libopencv_java4.so 各种CPU架构下的都有(基本上够用了,自己编译,需要用到cmake),这里不讲述了
在这里插入图片描述
4:windows版本的代码直接如下(android 复杂点还需要封装,请看后续章节)
代码也是参考别人的(具体记不得了) 跟 官方DEMO
这里用的是x64编译,记得下载windows 库啊

#include <iostream>
#include <opencv2/opencv.hpp>
//用到seetaface的7个模块
#include <seeta/FaceDetector.h>
#include <seeta/FaceLandmarker.h>
#include <seeta/FaceRecognizer.h>
#include <seeta/GenderPredictor.h>
#include <seeta/AgePredictor.h>
#include <seeta/EyeStateDetector.h>
#include <seeta/FaceAntiSpoofing.h>

#ifdef _DEBUG

//debug 库,11个
#pragma comment(lib,"SeetaFaceDetector600d.lib") 
#pragma comment(lib,"SeetaFaceLandmarker600d.lib")

#pragma comment(lib,"SeetaFaceRecognizer610d.lib")
#pragma comment(lib,"SeetaGenderPredictor600d.lib") 
#pragma comment(lib,"SeetaAgePredictor600d.lib") 
#pragma comment(lib,"SeetaFaceAntiSpoofingX600d.lib") 
#pragma comment(lib,"SeetaEyeStateDetector200d.lib")

//这四个没用到
#pragma comment(lib,"SeetaMaskDetector200d.lib")
#pragma comment(lib,"SeetaFaceTracking600d.lib") 
#pragma comment(lib,"SeetaPoseEstimation600d.lib")
#pragma comment(lib,"SeetaQualityAssessor300d.lib")

#pragma comment(lib,"opencv_world410d.lib")

#else
//release 库,11个
#pragma comment(lib,"SeetaFaceDetector600.lib") 
#pragma comment(lib,"SeetaFaceLandmarker600.lib")

#pragma comment(lib,"SeetaFaceRecognizer610.lib")
#pragma comment(lib,"SeetaGenderPredictor600.lib") 
#pragma comment(lib,"SeetaAgePredictor600.lib") 
#pragma comment(lib,"SeetaFaceAntiSpoofingX600.lib") 
#pragma comment(lib,"SeetaEyeStateDetector200.lib")

#pragma comment(lib,"SeetaMaskDetector200.lib")
#pragma comment(lib,"SeetaFaceTracking600.lib") 
#pragma comment(lib,"SeetaPoseEstimation600.lib")
#pragma comment(lib,"SeetaQualityAssessor300.lib")

#pragma comment(lib,"opencv_world410.lib")
#endif

using namespace seeta;
using namespace std;
using namespace cv;

void drawResult(Scalar color, std::vector<string> labels, int classId, float conf, int left, int top, int right, int bottom, Mat& frame);
//提取特征
bool extract_feature(Mat img, const FaceDetector& FD, const FaceLandmarker& FL, const FaceRecognizer& FR, float* feature)
{
	SeetaImageData simg;
	simg.height = img.rows;
	simg.width = img.cols;
	simg.channels = img.channels();
	simg.data = img.data;

	auto faces = FD.detect(simg);

	if (faces.size <= 0) {
		cout << "no face detected" << endl;
		return false;
	}

	SeetaPointF points[5];
	FL.mark(simg, faces.data[0].pos, points);

	FR.Extract(simg, points, feature);
	return true;
}

string get_eye_status(seeta::EyeStateDetector::EYE_STATE state)
{
	if (state == seeta::EyeStateDetector::EYE_CLOSE)
		return "CLOSE";
	else if (state == seeta::EyeStateDetector::EYE_OPEN)
		return "OPEN";
	else if (state == seeta::EyeStateDetector::EYE_RANDOM)
		return "NOT JUST";
	else
		return "NOT JUST";
}

string get_fas_status(seeta::FaceAntiSpoofing::Status status) {
	switch (status) {
	case seeta::FaceAntiSpoofing::REAL:
		return "real face";
	case seeta::FaceAntiSpoofing::SPOOF:
		return "ps face";
	case seeta::FaceAntiSpoofing::FUZZY:
		return "not just";
	case seeta::FaceAntiSpoofing::DETECTING:
		return "checking";
	}
	return "not just";
}


int main()
{
	string ModelPath = "./model/";

	//1.人脸检测模型初始化
	ModelSetting FD_setting;
	FD_setting.append(ModelPath + "face_detector.csta");
	FD_setting.set_device(ModelSetting::CPU);
	FD_setting.set_id(0);
	FaceDetector FD(FD_setting);

	//2.人脸关键点模型初始化
	ModelSetting PD_setting;
	PD_setting.append(ModelPath + "face_landmarker_pts5.csta");
	FaceLandmarker FL(PD_setting);

	//3.人脸识别模型初始化
	ModelSetting fr_setting;
	fr_setting.append(ModelPath + "face_recognizer.csta");
	FaceRecognizer FR(fr_setting);

	//4.性别检测模型初始化
	ModelSetting gb_setting(ModelPath + "gender_predictor.csta");
	GenderPredictor GP(gb_setting);

	//5.年龄检测模型初始化
	ModelSetting ap_setting(ModelPath + "age_predictor.csta");
	AgePredictor AP(ap_setting);

	//6.眼睛状态模型初始化
	ModelSetting setting;
	setting.append(ModelPath + "eye_state.csta");
	EyeStateDetector EBD(setting);

	//7.活体检测模型初始化
	ModelSetting anti_setting;
	anti_setting.append(ModelPath + "fas_first.csta");
	anti_setting.append(ModelPath + "fas_second.csta");
	FaceAntiSpoofing FAS(anti_setting);
	FAS.SetThreshold(0.3, 0.90);//设置默认阈值,另外一组阈值为(0.7, 0.55)
	FAS.SetBoxThresh(0.9);


	//建立人脸数据库的人脸特征向量:这里只有两张人脸1.jpg(),2.jpg()
	vector<pair<string, shared_ptr<float> > > feature_db;
	shared_ptr<float> feature1(new float[FR.GetExtractFeatureSize()]);
	Mat ldh = imread("./picture/model1.jpg");
	extract_feature(ldh, FD, FL, FR, feature1.get());
	feature_db.emplace_back(pair<string, shared_ptr<float>>("name1", feature1));

	//vector<pair<string, shared_ptr<float> > > feature_db;
	shared_ptr<float> feature2(new float[FR.GetExtractFeatureSize()]);
	Mat ldh2 = imread("./picture/mask1.jpg");
	extract_feature(ldh2, FD, FL, FR, feature2.get());
	feature_db.emplace_back(pair<string, shared_ptr<float>>("name2", feature2));

	shared_ptr<float> feature3(new float[FR.GetExtractFeatureSize()]);
	Mat ldh3 = imread("./picture/model3.jpg");
	extract_feature(ldh3, FD, FL, FR, feature3.get());
	feature_db.emplace_back(pair<string, shared_ptr<float>>("name3", feature3));

	namedWindow("SeetaFaceAntiSpoofing", 0);

	Mat frame;
	//VideoCapture capture("F:/20201204-WY-LDH-cut.mp4");// ");
	VideoCapture capture(0);// ");
	VideoWriter writer;
	cv::resize(ldh, ldh, cv::Size(120, 160));
	cv::resize(ldh2, ldh2, cv::Size(120, 160));
//	cv::resize(ldh3, ldh3, cv::Size(120, 160));
//	cv::resize(wy, wy, cv::Size(120, 160));

	if (!capture.isOpened())
	{
		cout << "fail to open!" << endl;
		return -1;
	}

	while (true)
	{
		if (!capture.read(frame)) {
			cout << "can not read any frame" << endl;
			break;
		}
		//ImageData image = frame;
		SeetaImageData image;
		image.height = frame.rows;
		image.width = frame.cols;
		image.channels = frame.channels();
		image.data = frame.data;

		auto faces = FD.detect(image);
		cout << "faces.size:" << faces.size << endl;

		for (int i = 0; i < faces.size; i++)
		{
			vector<string> labels;
			Scalar color(0x00, 0xA0, 0x00);
			//----人脸----
			auto face = faces.data[i].pos;

			//----关键点检测----
			vector<SeetaPointF> points(FL.number());
			FL.mark(image, face, points.data());

			//----人脸识别----
			unique_ptr<float[]> feature(new float[FR.GetExtractFeatureSize()]);
			FR.Extract(image, points.data(), feature.get());

			//人脸识别
			float threshold = 0.60;
			int64_t target_index = -1;
			float max_sim = 0;
			for (size_t index = 0; index < feature_db.size(); ++index) {
				auto& pair_name_feat = feature_db[index];
				float current_sim = FR.CalculateSimilarity(feature.get(), pair_name_feat.second.get());
				if (current_sim > max_sim) {
					max_sim = current_sim;
					target_index = index;
				}
			}
			if (max_sim > threshold)
				labels.push_back(feature_db[target_index].first + "(similar:" + to_string(max_sim * 100).substr(0, 5) + ")");
			else
				labels.push_back("not found person");
/*
			//----性别----
			GenderPredictor::GENDER gender;
			GP.PredictGenderWithCrop(image, points.data(), gender);
			string gender_str = (string("sex:") + (gender == GenderPredictor::GENDER::MALE ? "man" : "woman"));

			//----年龄----
			int age;
			AP.PredictAgeWithCrop(image, points.data(), age);
			labels.push_back(gender_str + string("  age:") + to_string(age));

			//----眼睛状态----
			EyeStateDetector::EYE_STATE leftstate, rightstate;
			EBD.Detect(image, points.data(), leftstate, rightstate);
			labels.push_back(string("left eye") + get_eye_status(leftstate) + string(",right eye:") + get_eye_status(rightstate));
*/
			//活体检测
			auto status = FAS.Predict(image, face, points.data());//PredictVideo
			float clarity;
			float reality;

			FAS.GetPreFrameScore(&clarity, &reality);
			labels.push_back(string("video check:") + get_fas_status(status));
			if (status == FaceAntiSpoofing::SPOOF)
				color = Scalar(0x00, 0x00, 0xB0);
			drawResult(color, labels, 0, 0.0f, face.x, face.y, face.x + face.width, face.y + face.height, frame);
		}
		Scalar title_color(0x00, 0x8C, 0xFF);
		//绘制人脸库
		int space = 6;
		frame(cv::Rect(frame.cols / 2 - ldh.cols - space / 2 - space, frame.rows - ldh.rows - space - space, ldh.cols * 2 + space + 2 * space, ldh.rows + 2 * space)) = title_color;
		ldh.copyTo(frame(cv::Rect(frame.cols / 2 - ldh.cols - space / 2, frame.rows - ldh.rows - space, ldh.cols, ldh.rows)));
		ldh2.copyTo(frame(cv::Rect(frame.cols / 2 + space / 2, frame.rows - ldh2.rows - space, ldh2.cols, ldh2.rows)));
	//	wy.copyTo(frame(cv::Rect(frame.cols / 2 + space / 2, frame.rows - wy.rows - space, wy.cols, wy.rows)));



		//写入文件
		if (!writer.isOpened()) {
			writer.open("D:/setaface.avi", VideoWriter::fourcc('M', 'J', 'P', 'G'), 30, cv::Size(frame.cols, frame.rows), true);
		}
		if (writer.isOpened()) {
			writer.write(frame);
		}

		//窗口显示
		imshow("SeetaFace6", frame);

		//Esc键退出
		if (waitKey(1) == 27)
			break;
	}

	writer.release();
	capture.release();

	return 0;
}


#pragma once
#include <string>
#include <iostream>

#include <opencv2/opencv.hpp>
//nclude <opencv2/freetype.hpp>

using namespace cv;
using namespace std;

//float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
//
//float get_color(int c, int x, int max)
//{
//    float ratio = ((float)x / max) * 5;
//    int i = floor(ratio);
//    int j = ceil(ratio);
//    ratio -= i;
//    float r = (1 - ratio) * colors[i][c] + ratio * colors[j][c];
//    return r;
//}


void drawResult(Scalar color, std::vector<string> labels, int classId, float conf, int left, int top, int right, int bottom, Mat& frame)
{
	int fontHeight = FONT_HERSHEY_SIMPLEX; // 25;
	int fontTotalHeight = fontHeight * labels.size();
	int thickness = -1;
	int linestyle = LineTypes::LINE_AA;
	int baseline = 0;

	int max_label_index = 0;
	int padding = 5;
	for (int i = 1; i < labels.size(); i++) {
		if (labels[i].length() > labels[max_label_index].length())
			max_label_index = i;
	}
	cv::Size text_size;
//	if (ft2) text_size = ft2->getTextSize(labels[max_label_index], fontHeight, thickness, &baseline);
//	else text_size = cv::getTextSize(labels[max_label_index], fontHeight, 1.0f, thickness = 0, &baseline);}

	string  text1 = labels[max_label_index];
	text_size = cv::getTextSize(text1, fontHeight, 1.0f, 3, &baseline);//thickness =

	fontTotalHeight += 10 * padding + 2 * labels.size();
	text_size.width += 2 * padding;

	cv::Point pt1, pt2;
	cv::Point pt_text_bg1, pt_text_bg2;
	cv::Point pt_text;

	//物体框
	pt1.x = left;
	pt1.y = top;
	pt2.x = right;
	pt2.y = bottom;

	//文本背景框
	pt_text_bg1.x = left;
	pt_text_bg1.y = top - fontTotalHeight;
	pt_text_bg2.x = std::max(left + text_size.width, right);
	pt_text_bg2.y = top;


	//文本原点(左下角)
	pt_text.x = left + padding;
	pt_text.y = top - padding;

	static int rect_line_width = 2;//std::max(1.0f, show_img->rows * .002f);
	cv::rectangle(frame, pt1, pt2, color, rect_line_width, linestyle, 0);

	cv::rectangle(frame, pt_text_bg1, pt_text_bg2, color, rect_line_width, linestyle, 0);
	cv::rectangle(frame, pt_text_bg1, pt_text_bg2, color, cv::FILLED, linestyle, 0);

	static cv::Scalar text_color = CV_RGB(255, 255, 255);

	

	for (int i = labels.size() - 1; i >= 0; i--) {//Point2i(40, 150)
		putText(frame, labels[i], pt_text, FONT_HERSHEY_PLAIN, 1, Scalar(0, 255, 0), 1, 8, false);
	//	if (ft2)ft2->putText(frame, labels[i], pt_text, fontHeight + (i == 0 ? 5 : 0), text_color, thickness, linestyle, true);
	//	else putText(frame, labels[i], pt_text, fontHeight, 1.0f, text_color, thickness = 0, linestyle, true);
	//	putText(frame, labels[i], pt_text, fontHeight, 1.0f, text_color, thickness = 3, linestyle, true);
		pt_text.y -= (fontHeight + 15);//((labels.size()-1 -i) * fontHeight);
		//break;
	}

}

目录配置
在这里插入图片描述
在这里插入图片描述

4:下一章 seetaface6 android 封装

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值