SCRFD文章链接:https://arxiv.org/abs/2105.04714
SCRFD项目地址:https://github.com/deepinsight/insightface/tree/master/detection/scrfd
ncnn官方例程:https://github.com/Tencent/ncnn/blob/master/examples/scrfd.cpp
代码
- 为了方便后续使用,用类封装一下算法
- 为了提高算法计算速度,将
target_size
由原来的640改成320
scrfd.h
#ifndef _SCRFD_H_
#define _SCRFD_H_
#include <vector>
#include <iostream>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include "net.h"
struct FaceObject
{
cv::Rect_<float> rect;
float prob;
};
class SCRFD {
private:
int width, height; //原始输入图像的宽和高
const int target_size = 320;
const float prob_threshold = 0.3f;
const float nms_threshold = 0.45f;
const float mean_vals[3] = { 127.5f, 127.5f, 127.5f };
const float norm_vals[3] = { 1 / 128.f, 1 / 128.f, 1 / 128.f };
ncnn::Net scrfd;
float intersection_area(const FaceObject& a, const FaceObject& b);
void qsort_descent_inplace(std::vector<FaceObject>& faceobjects);
void qsort_descent_inplace(std::vector<FaceObject>& faceobjects, int left, int right);
void nms_sorted_bboxes(const std::vector<FaceObject>& faceobjects, std::vector<int>& picked, float nms_threshold);
ncnn::Mat generate_anchors(int base_size, const ncnn::Mat& ratios, const ncnn::Mat& scales);
void generate_proposals(const ncnn::Mat& anchors, int feat_stride, const ncnn::Mat& score_blob, const ncnn::Mat& bbox_blob, float prob_threshold, std::vector<FaceObject>& faceobjects);
public:
SCRFD(const std::string& model_path); // 构造函数
~SCRFD();
int detect_scrfd(const cv::Mat& bgr, std::vector<FaceObject>& faceobjects);
};
#endif
scrfd.cpp
#include "scrfd.h"
SCRFD::SCRFD(const std::string& model_path) {
width = height = 0;
std::string param_file = model_path + "/scrfd_500m-opt2.param";
std::string bin_file = model_path + "/scrfd_500m-opt2.bin";
scrfd.load_param(param_file.data());
scrfd.load_model(bin_file.data());
}
SCRFD::~SCRFD() {
scrfd.clear();
}
float SCRFD::intersection_area(const FaceObject& a, const FaceObject& b) {
cv::Rect_<float> inter = a.rect & b.rect;
return inter.area();
}
void SCRFD::qsort_descent_inplace(std::vector<FaceObject>& faceobjects) {
if (faceobjects.empty())
return;
qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}
void SCRFD::qsort_descent_inplace(std::vector<FaceObject>& faceobjects, int left, int right) {
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;
while (i <= j)
{
while (faceobjects[i].prob > p)
i++;
while (faceobjects[j].prob < p)
j--;
if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);
i++;
j--;
}
}
#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}
void SCRFD::nms_sorted_bboxes(const std::vector<FaceObject>& faceobjects, std::vector<int>& picked, float nms_threshold) {
picked.clear();
const int n = faceobjects.size();
std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].rect.area();
}
for (int i = 0; i < n; i++)
{
const FaceObject& a = faceobjects[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const FaceObject& b = faceobjects[picked[j]];
// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}
if (keep)
picked.push_back(i);
}
}
ncnn::Mat SCRFD::generate_anchors(int base_size, const ncnn::Mat& ratios, const ncnn::Mat& scales) {
int num_ratio = ratios.w;
int num_scale = scales.w;
ncnn::Mat anchors;
anchors.create(4, num_ratio * num_scale);
const float cx = 0;
const float cy = 0;
for (int i = 0; i < num_ratio; i++)
{
float ar = ratios[i];
int r_w = round(base_size / sqrt(ar));
int r_h = round(r_w * ar); //round(base_size * sqrt(ar));
for (int j = 0; j < num_scale; j++)
{
float scale = scales[j];
float rs_w = r_w * scale;
float rs_h = r_h * scale;
float* anchor = anchors.row(i * num_scale + j);
anchor[0] = cx - rs_w * 0.5f;
anchor[1] = cy - rs_h * 0.5f;
anchor[2] = cx + rs_w * 0.5f;
anchor[3] = cy + rs_h * 0.5f;
}
}
return anchors;
}
void SCRFD::generate_proposals(const ncnn::Mat& anchors, int feat_stride, const ncnn::Mat& score_blob, const ncnn::Mat& bbox_blob, float prob_threshold, std::vector<FaceObject>& faceobjects) {
int w = score_blob.w;
int h = score_blob.h;
// generate face proposal from bbox deltas and shifted anchors
const int num_anchors = anchors.h;
for (int q = 0; q < num_anchors; q++)
{
const float* anchor = anchors.row(q);
const ncnn::Mat score = score_blob.channel(q);
const ncnn::Mat bbox = bbox_blob.channel_range(q * 4, 4);
// shifted anchor
float anchor_y = anchor[1];
float anchor_w = anchor[2] - anchor[0];
float anchor_h = anchor[3] - anchor[1];
for (int i = 0; i < h; i++)
{
float anchor_x = anchor[0];
for (int j = 0; j < w; j++)
{
int index = i * w + j;
float prob = score[index];
if (prob >= prob_threshold)
{
// insightface/detection/scrfd/mmdet/models/dense_heads/scrfd_head.py _get_bboxes_single()
float dx = bbox.channel(0)[index] * feat_stride;
float dy = bbox.channel(1)[index] * feat_stride;
float dw = bbox.channel(2)[index] * feat_stride;
float dh = bbox.channel(3)[index] * feat_stride;
// insightface/detection/scrfd/mmdet/core/bbox/transforms.py distance2bbox()
float cx = anchor_x + anchor_w * 0.5f;
float cy = anchor_y + anchor_h * 0.5f;
float x0 = cx - dx;
float y0 = cy - dy;
float x1 = cx + dw;
float y1 = cy + dh;
FaceObject obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = x1 - x0 + 1;
obj.rect.height = y1 - y0 + 1;
obj.prob = prob;
faceobjects.push_back(obj);
}
anchor_x += feat_stride;
}
anchor_y += feat_stride;
}
}
}
int SCRFD::detect_scrfd(const cv::Mat& bgr, std::vector<FaceObject>& faceobjects) {
width = bgr.cols;
height = bgr.rows;
// pad to multiple of 32
int w = width;
int h = height;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}
ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, width, height, w, h);
// pad to target_size rectangle
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 0.f);
in_pad.substract_mean_normalize(mean_vals, norm_vals);
ncnn::Extractor ex = scrfd.create_extractor();
ex.input("input.1", in_pad);
std::vector<FaceObject> faceproposals;
// stride 32
{
ncnn::Mat score_blob, bbox_blob;
ex.extract("412", score_blob);
ex.extract("415", bbox_blob);
const int base_size = 16;
const int feat_stride = 8;
ncnn::Mat ratios(1);
ratios[0] = 1.f;
ncnn::Mat scales(2);
scales[0] = 1.f;
scales[1] = 2.f;
ncnn::Mat anchors = generate_anchors(base_size, ratios, scales);
std::vector<FaceObject> faceobjects32;
generate_proposals(anchors, feat_stride, score_blob, bbox_blob, prob_threshold, faceobjects32);
faceproposals.insert(faceproposals.end(), faceobjects32.begin(), faceobjects32.end());
}
// stride 16
{
ncnn::Mat score_blob, bbox_blob;
ex.extract("474", score_blob);
ex.extract("477", bbox_blob);
const int base_size = 64;
const int feat_stride = 16;
ncnn::Mat ratios(1);
ratios[0] = 1.f;
ncnn::Mat scales(2);
scales[0] = 1.f;
scales[1] = 2.f;
ncnn::Mat anchors = generate_anchors(base_size, ratios, scales);
std::vector<FaceObject> faceobjects16;
generate_proposals(anchors, feat_stride, score_blob, bbox_blob, prob_threshold, faceobjects16);
faceproposals.insert(faceproposals.end(), faceobjects16.begin(), faceobjects16.end());
}
// stride 8
{
ncnn::Mat score_blob, bbox_blob;
ex.extract("536", score_blob);
ex.extract("539", bbox_blob);
const int base_size = 256;
const int feat_stride = 32;
ncnn::Mat ratios(1);
ratios[0] = 1.f;
ncnn::Mat scales(2);
scales[0] = 1.f;
scales[1] = 2.f;
ncnn::Mat anchors = generate_anchors(base_size, ratios, scales);
std::vector<FaceObject> faceobjects8;
generate_proposals(anchors, feat_stride, score_blob, bbox_blob, prob_threshold, faceobjects8);
faceproposals.insert(faceproposals.end(), faceobjects8.begin(), faceobjects8.end());
}
// sort all proposals by score from highest to lowest
qsort_descent_inplace(faceproposals);
// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(faceproposals, picked, nms_threshold);
int face_count = picked.size();
faceobjects.resize(face_count);
for (int i = 0; i < face_count; i++)
{
faceobjects[i] = faceproposals[picked[i]];
// adjust offset to original unpadded
float x0 = (faceobjects[i].rect.x - (wpad / 2)) / scale;
float y0 = (faceobjects[i].rect.y - (hpad / 2)) / scale;
float x1 = (faceobjects[i].rect.x + faceobjects[i].rect.width - (wpad / 2)) / scale;
float y1 = (faceobjects[i].rect.y + faceobjects[i].rect.height - (hpad / 2)) / scale;
x0 = (std::max)((std::min)(x0, (float)width - 1), 0.f);
y0 = (std::max)((std::min)(y0, (float)height - 1), 0.f);
x1 = (std::max)((std::min)(x1, (float)width - 1), 0.f);
y1 = (std::max)((std::min)(y1, (float)height - 1), 0.f);
faceobjects[i].rect.x = x0;
faceobjects[i].rect.y = y0;
faceobjects[i].rect.width = x1 - x0;
faceobjects[i].rect.height = y1 - y0;
}
return 0;
}
main.cpp
#include "scrfd.h"
static void draw_faceobjects(const cv::Mat& bgr, const std::vector<FaceObject>& faceobjects)
{
cv::Mat image = bgr.clone();
for (size_t i = 0; i < faceobjects.size(); i++)
{
const FaceObject& obj = faceobjects[i];
fprintf(stderr, "%.5f at %.2f %.2f %.2f x %.2f\n", obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(image, obj.rect, cv::Scalar(0, 255, 0));
char text[256];
sprintf_s(text, "%.1f%%", obj.prob * 100);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y < 0)
y = 0;
if (x + label_size.width > image.cols)
x = image.cols - label_size.width;
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(255, 255, 255), -1);
cv::putText(image, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
cv::imshow("image", image);
cv::waitKey(0); //检测视频时注释
}
int main()
{
// detect image
std::string modelPath = "./models";
std::string imagepath = "./samples/Snipaste_2022-05-20_11-01-12.png";
cv::Mat m = cv::imread(imagepath);
double t0 = (double)cv::getTickCount();
SCRFD* detector = new SCRFD(modelPath);
std::vector<FaceObject> faceobjects;
detector->detect_scrfd(m, faceobjects);
std::cout << "detect total time = " << ((double)cv::getTickCount() - t0) * 1000 / cv::getTickFrequency() << std::endl;
double t1 = (double)cv::getTickCount();
draw_faceobjects(m, faceobjects);
std::cout << "draw total time = " << ((double)cv::getTickCount() - t1) * 1000 / cv::getTickFrequency() << std::endl;
// detect video 检测视频时将draw_faceobjects函数中最后一行注释掉
//std::string modelPath = "./models";
//std::string videoPath = "./samples/bili892156991.mp4";
//cv::VideoCapture mVideoCapture;
//mVideoCapture.open(videoPath);
//if (!mVideoCapture.isOpened()) {
// std::cout << "fail to openn!" << std::endl;
//}
//cv::Mat frame;
//mVideoCapture >> frame;
//SCRFD* detector = new SCRFD(modelPath);
//std::vector<FaceObject> faceobjects;
//while (!frame.empty()) {
// mVideoCapture >> frame;
// if (frame.empty()) {
// std::cout << "this frame is empty! " << std::endl;
// }
// double t2 = (double)cv::getTickCount();
// detector->detect_scrfd(frame, faceobjects);
// printf("total %gms\n", ((double)cv::getTickCount() - t2) * 1000 / cv::getTickFrequency());
// printf("------------------\n");
// draw_faceobjects(frame, faceobjects);
// int q = cv::waitKey(10);
// if (q == 27) {
// break;
// }
//}
return 0;
}
检测效果如下
速度非常快!检测图像分辨率为
1920
×
1080
1920\times1080
1920×1080,多人脸速度还能这么快,简直太牛了!
但是!SCRFD_500M
模型不输出人脸地标,但想到MTCNN
算法中O-Net
可以输出五个人脸关键点,于是可以将两者结合起来!将之前写的MTCNN类修改一下,并为其类添加一个函数,用于输入SCRFD
的输出。
mtcnn.h
#ifndef _MTCNN_H_
#define _MTCNN_H_
#include "net.h"
#include <string>
using namespace std;
struct Bbox
{
float score; //置信度
int x1;
int y1;
int x2;
int y2; //框左上和右下两个坐标点
float area; //框的面积
float ppoint[10]; //人脸的5个特征点
float regreCoord[4]; //4个坐标的修正信息
};
class ONet {
private:
ncnn::Net Onet;
public:
ONet(string& modelPath);
~ONet();
Bbox onetDetect(ncnn::Mat& img, int x, int y, int w, int h);
};
#endif
类的实现代码mtcnn.cpp
#include "mtcnn.h"
ONet::ONet(string& modelPath) {
string param_file = modelPath + "/det3.param";
string bin_file = modelPath + "/det3.bin";
Onet.load_param(param_file.data());
Onet.load_model(bin_file.data());
}
ONet::~ONet() {
Onet.clear();
}
// img:ncnn中Mat型数据,原图中截取的一张人脸数据
// x:原图中人脸矩形框横坐标
// y:原图中人脸矩形框纵坐标
// w:原图中人脸矩形框宽度
// h:原图中人脸矩形框高度
// Bbox:一张人脸输出结果,包含矩形框位置,置信度,五个地标坐标,坐标修正值(这里不用)
Bbox ONet::onetDetect(ncnn::Mat& img, int x, int y, int w, int h) {
Bbox faceBbox;
const float mean_vals[3] = { 127.5f, 127.5f, 127.5f };
const float norm_vals[3] = { 1.0 / 127.5, 1.0 / 127.5, 1.0 / 127.5 };
img.substract_mean_normalize(mean_vals, norm_vals);
ncnn::Extractor ex = Onet.create_extractor();
ex.set_light_mode(true);
ex.input("data", img);
ncnn::Mat score, bbox, keyPoint;
ex.extract("prob1", score);
ex.extract("conv6-2", bbox);
ex.extract("conv6-3", keyPoint);
faceBbox.score = (float)score[1];
faceBbox.x1 = static_cast<int>(bbox[0] * w) + x;
faceBbox.y1 = static_cast<int>(bbox[1] * h) + y;
faceBbox.x2 = static_cast<int>(bbox[2] * w) + w + x;
faceBbox.y2 = static_cast<int>(bbox[3] * h) + h + y;
for (int num = 0; num < 5; num++) {
(faceBbox.ppoint)[num] = x + w * keyPoint[num];
(faceBbox.ppoint)[num + 5] = y + h * keyPoint[num + 5];
}
return faceBbox;
}
在main
函数中自然需要创建ONet
类,然后调用上述函数,即可输出五个人脸关键点
添加代码如下,只要经过上述函数就可就图像中每个人脸有一个Bbox
型数据,其中包含五个人脸关键点坐标,直接在原图中
画出矩形框与关键点,注释掉原本函数draw_faceobjects
// 创建实例
ONet* detector_mtcnn = new ONet(modelPath);
double t1 = (double)cv::getTickCount();
//用SCRFD检测到的人脸矩形框传入mtcnn的onet中
//已有的是vector容器faceobjects,但faceobjects中rect存的是人脸区域
for (int i = 0; i < faceobjects.size(); i++) { //对每个目标人脸依次遍历,传onet
// cv::Mat imgCrop = m(faceobjects[i].rect); //从原图上裁剪目标区域,这个方式不行!
cv::Mat faceROI_Image;
m(faceobjects[i].rect).copyTo(faceROI_Image); //行!
ncnn::Mat in = ncnn::Mat::from_pixels_resize(faceROI_Image.data, ncnn::Mat::PIXEL_BGR, faceROI_Image.cols, faceROI_Image.rows, 48, 48);
// 传入onet
Bbox faceBbox = detector_mtcnn->onetDetect(in, faceobjects[i].rect.x, faceobjects[i].rect.y, faceROI_Image.cols, faceROI_Image.rows);
std::cout << i << " " << faceBbox.x1 << " ";
std::cout << faceBbox.y1 << " " << faceBbox.x2 << " " << faceBbox.y2 << std::endl;
std::cout << i << " " << faceBbox.score << std::endl;
float sim = faceBbox.score;
// 微调矩形框
if (sim > 0.1) { //stablize
cv::Rect bdbox; // 临时调整用
bdbox.x = faceBbox.x1;
bdbox.y = faceBbox.y1;
bdbox.width = faceBbox.x2 - faceBbox.x1;
bdbox.height = faceBbox.y2 - faceBbox.y1;
bdbox = SquarePadding(bdbox, static_cast<int>(bdbox.height * -0.05));
bdbox = SquarePadding(bdbox, m.rows, m.cols, 1);
faceBbox.x1 = bdbox.x;
faceBbox.y1 = bdbox.y;
faceBbox.x2 = bdbox.x + bdbox.width;
faceBbox.y2 = bdbox.y + bdbox.height;
// 调整后输出
std::cout << i << " " << faceBbox.x1 << " ";
std::cout << faceBbox.y1 << " " << faceBbox.x2 << " " << faceBbox.y2 << std::endl;
//画图用
//cv::Rect rect;
//cv::Scalar color = cv::Scalar(0, 0, 255);
//rect.x = faceBbox.x1;
//rect.y = faceBbox.y1;
//rect.width = faceBbox.x2 - faceBbox.x1;
//rect.height = faceBbox.y2 - faceBbox.y1;
//rectangle(m, rect, color, 2);
//for (int j = 0; j < 5; j++)
//{
// cv::Point p = cv::Point(faceBbox.ppoint[j], faceBbox.ppoint[j + 5]);
// cv::circle(m, p, 2, color, 2);
//}
}
}
//cv::imshow("image", m);
std::cout << "Onet total time = " << ((double)cv::getTickCount() - t1) * 1000 / cv::getTickFrequency() << std::endl;
cv::waitKey(0);
其中,还有一个关键函数SquarePadding
,两个重载,代码如下。至于其作用,应该是为跟踪阶段NCC匹配服务?没道理啊,跟踪阶段中执行onet跟踪前就已经进行过NCC匹配。不理解!
static cv::Rect SquarePadding(cv::Rect facebox, int margin_rows, int margin_cols, bool max_b)
{
int c_x = facebox.x + facebox.width / 2;
int c_y = facebox.y + facebox.height / 2;
int large = 0;
if (max_b)
large = max(facebox.height, facebox.width) / 2;
else
large = min(facebox.height, facebox.width) / 2;
cv::Rect rectNot(c_x - large, c_y - large, c_x + large, c_y + large);
rectNot.x = max(0, rectNot.x);
rectNot.y = max(0, rectNot.y);
rectNot.height = min(rectNot.height, margin_rows - 1);
rectNot.width = min(rectNot.width, margin_cols - 1);
if (rectNot.height - rectNot.y != rectNot.width - rectNot.x)
return SquarePadding(cv::Rect(rectNot.x, rectNot.y, rectNot.width - rectNot.x, rectNot.height - rectNot.y), margin_rows, margin_cols, false);
return cv::Rect(rectNot.x, rectNot.y, rectNot.width - rectNot.x, rectNot.height - rectNot.y);
}
static cv::Rect SquarePadding(cv::Rect facebox, int padding)
{
int c_x = facebox.x - padding;
int c_y = facebox.y - padding;
return cv::Rect(facebox.x - padding, facebox.y - padding, facebox.width + padding * 2, facebox.height + padding * 2);;
}
将SCRFD和ONet结合在一起的main文件
兼具检测图像与视频
#include "scrfd.h"
#include "mtcnn.h"
static cv::Rect SquarePadding(cv::Rect facebox, int margin_rows, int margin_cols, bool max_b)
{
int c_x = facebox.x + facebox.width / 2;
int c_y = facebox.y + facebox.height / 2;
int large = 0;
if (max_b)
large = max(facebox.height, facebox.width) / 2;
else
large = min(facebox.height, facebox.width) / 2;
cv::Rect rectNot(c_x - large, c_y - large, c_x + large, c_y + large);
rectNot.x = max(0, rectNot.x);
rectNot.y = max(0, rectNot.y);
rectNot.height = min(rectNot.height, margin_rows - 1);
rectNot.width = min(rectNot.width, margin_cols - 1);
if (rectNot.height - rectNot.y != rectNot.width - rectNot.x)
return SquarePadding(cv::Rect(rectNot.x, rectNot.y, rectNot.width - rectNot.x, rectNot.height - rectNot.y), margin_rows, margin_cols, false);
return cv::Rect(rectNot.x, rectNot.y, rectNot.width - rectNot.x, rectNot.height - rectNot.y);
}
static cv::Rect SquarePadding(cv::Rect facebox, int padding)
{
int c_x = facebox.x - padding;
int c_y = facebox.y - padding;
return cv::Rect(facebox.x - padding, facebox.y - padding, facebox.width + padding * 2, facebox.height + padding * 2);;
}
int main()
{
// detect image
std::string modelPath = "./models";
std::string imagepath = "./samples/Snipaste_2022-05-20_11-01-12.png";
cv::Mat m = cv::imread(imagepath);
SCRFD* detector = new SCRFD(modelPath);
ONet* detector_mtcnn = new ONet(modelPath);
double t0 = (double)cv::getTickCount();
std::vector<FaceObject> faceobjects;
detector->detect_scrfd(m, faceobjects);
std::cout << faceobjects.size() << std::endl;
std::vector<Bbox> result;
//用SCRFD检测到的人脸矩形框传入mtcnn的onet中
//已有的是vector容器faceobjects,但faceobjects中rect存的是人脸区域
for (int i = 0; i < faceobjects.size(); i++) { //对每个目标人脸依次遍历,传onet
cv::Mat faceROI_Image;
m(faceobjects[i].rect).copyTo(faceROI_Image); //行!
ncnn::Mat in = ncnn::Mat::from_pixels_resize(faceROI_Image.data, ncnn::Mat::PIXEL_BGR, faceROI_Image.cols, faceROI_Image.rows, 48, 48);
// 传入onet
Bbox faceBbox = detector_mtcnn->onetDetect(in, faceobjects[i].rect.x, faceobjects[i].rect.y, faceROI_Image.cols, faceROI_Image.rows);
float sim = faceBbox.score;
// 微调矩形框
if (sim > 0.1) { //stablize
cv::Rect bdbox; // 临时调整用
bdbox.x = faceBbox.x1;
bdbox.y = faceBbox.y1;
bdbox.width = faceBbox.x2 - faceBbox.x1;
bdbox.height = faceBbox.y2 - faceBbox.y1;
bdbox = SquarePadding(bdbox, static_cast<int>(bdbox.height * -0.05));
bdbox = SquarePadding(bdbox, m.rows, m.cols, 1);
faceBbox.x1 = bdbox.x;
faceBbox.y1 = bdbox.y;
faceBbox.x2 = bdbox.x + bdbox.width;
faceBbox.y2 = bdbox.y + bdbox.height;
}
result.push_back(faceBbox);
}
std::cout << "detect total time = " << ((double)cv::getTickCount() - t0) * 1000 / cv::getTickFrequency() << std::endl;
//画图用
for (int i = 0; i < result.size(); i++) {
cv::Rect rect;
cv::Scalar color = cv::Scalar(0, 0, 255);
rect.x = result[i].x1;
rect.y = result[i].y1;
rect.width = result[i].x2 - result[i].x1;
rect.height = result[i].y2 - result[i].y1;
rectangle(m, rect, color, 2);
for (int j = 0; j < 5; j++)
{
cv::Point p = cv::Point(result[i].ppoint[j], result[i].ppoint[j + 5]);
cv::circle(m, p, 2, color, 2);
}
}
cv::imshow("image", m);
cv::waitKey(0);
//-----------------------------------------------------------------------------
//=============================================================================
// detect video
//std::string modelPath = "./models";
//std::string videoPath = "./samples/6-14-2.mp4";
//cv::VideoCapture mVideoCapture;
//mVideoCapture.open(videoPath);
//if (!mVideoCapture.isOpened()) {
// std::cout << "fail to open!" << std::endl;
//}
//cv::Mat frame;
//SCRFD* detector = new SCRFD(modelPath);
//ONet* detector_mtcnn = new ONet(modelPath);
//std::vector<FaceObject> faceobjects;
//std::vector<Bbox> result;
//for(;;) {
// result.clear(); //清除上一帧的人脸区域
// if (!mVideoCapture.read(frame))
// {
// break;
// }
// double t2 = (double)cv::getTickCount();
// detector->detect_scrfd(frame, faceobjects);
// for (int i = 0; i < faceobjects.size(); i++) { //对每个目标人脸依次遍历,传onet
// cv::Mat faceROI_Image;
// frame(faceobjects[i].rect).copyTo(faceROI_Image); //行!
// ncnn::Mat in = ncnn::Mat::from_pixels_resize(faceROI_Image.data, ncnn::Mat::PIXEL_BGR, faceROI_Image.cols, faceROI_Image.rows, 48, 48);
// // 传入onet
// Bbox faceBbox = detector_mtcnn->onetDetect(in, faceobjects[i].rect.x, faceobjects[i].rect.y, faceROI_Image.cols, faceROI_Image.rows);
// float sim = faceBbox.score;
// // 微调矩形框
// if (sim > 0.1) { //stablize
// cv::Rect bdbox; // 临时调整用
// bdbox.x = faceBbox.x1;
// bdbox.y = faceBbox.y1;
// bdbox.width = faceBbox.x2 - faceBbox.x1;
// bdbox.height = faceBbox.y2 - faceBbox.y1;
// bdbox = SquarePadding(bdbox, static_cast<int>(bdbox.height * -0.05));
// bdbox = SquarePadding(bdbox, frame.rows, frame.cols, 1);
// faceBbox.x1 = bdbox.x;
// faceBbox.y1 = bdbox.y;
// faceBbox.x2 = bdbox.x + bdbox.width;
// faceBbox.y2 = bdbox.y + bdbox.height;
// }
// result.push_back(faceBbox);
// }
// printf("total %gms\n", ((double)cv::getTickCount() - t2) * 1000 / cv::getTickFrequency());
// printf("------------------\n");
// //画图
// for (int i = 0; i < result.size(); i++) {
// cv::Rect rect;
// cv::Scalar color = cv::Scalar(0, 0, 255);
// rect.x = result[i].x1;
// rect.y = result[i].y1;
// rect.width = result[i].x2 - result[i].x1;
// rect.height = result[i].y2 - result[i].y1;
// rectangle(frame, rect, color, 2);
// for (int j = 0; j < 5; j++)
// {
// cv::Point p = cv::Point(result[i].ppoint[j], result[i].ppoint[j + 5]);
// cv::circle(frame, p, 2, color, 2);
// }
// }
// cv::imshow("image", frame);
// //cv::waitKey(0);
// int q = cv::waitKey(10);
// if (q == 27) {
// break;
// }
//}
return 0;
}
检测效果
可以看出,多人脸检测速度依旧不遑多让。