目录
先看效果
获取人脸特征值
说明
C#版Facefusion一共有如下5个步骤:
1、使用yoloface_8n.onnx进行人脸检测
2、使用2dfan4.onnx获取人脸关键点
3、使用arcface_w600k_r50.onnx获取人脸特征值
4、使用inswapper_128.onnx进行人脸交换
5、使用gfpgan_1.4.onnx进行人脸增强
本文分享使用arcface_w600k_r50.onnx实现C#版Facefusion第三步:获取人脸特征值。
顺便再看一下C++、Python代码的实现方式,可以对比学习。
回顾:
C#版Facefusion:让你的脸与世界融为一体!-01 人脸检测
C#版Facefusion:让你的脸与世界融为一体!-02 获取人脸关键点
模型信息
Inputs
-------------------------
name:input.1
tensor:Float[-1, 3, 112, 112]
---------------------------------------------------------------
Outputs
-------------------------
name:683
tensor:Float[1, 512]
---------------------------------------------------------------
代码
调用代码
using Newtonsoft.Json;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Windows.Forms;
namespace FaceFusionSharp
{
public partial class Form3 : Form
{
public Form3()
{
InitializeComponent();
}
string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
string source_path = "";
string target_path = "";
FaceEmbdding face_embedding;
private void button2_Click(object sender, EventArgs e)
{
OpenFileDialog ofd = new OpenFileDialog();
ofd.Filter = fileFilter;
if (ofd.ShowDialog() != DialogResult.OK) return;
pictureBox1.Image = null;
source_path = ofd.FileName;
pictureBox1.Image = new Bitmap(source_path);
}
private void button3_Click(object sender, EventArgs e)
{
OpenFileDialog ofd = new OpenFileDialog();
ofd.Filter = fileFilter;
if (ofd.ShowDialog() != DialogResult.OK) return;
pictureBox2.Image = null;
target_path = ofd.FileName;
pictureBox2.Image = new Bitmap(target_path);
}
private void button1_Click(object sender, EventArgs e)
{
if (pictureBox1.Image == null || pictureBox2.Image == null)
{
return;
}
button1.Enabled = false;
Application.DoEvents();
Mat source_img = Cv2.ImRead(source_path);
List<Point2f> face68landmarks = new List<Point2f>();
string face68landmarksStr = "[{\"X\":388.652832,\"Y\":492.494781},{\"X\":639.836365,\"Y\":493.765778},{\"X\":501.1994,\"Y\":655.1151},{\"X\":393.037354,\"Y\":730.373047},{\"X\":646.8359,\"Y\":733.28894}]";
face68landmarks = JsonConvert.DeserializeObject<List<Point2f>>(face68landmarksStr);
List<float> source_face_embedding = face_embedding.detect(source_img, face68landmarks);
textBox1.Text= JsonConvert.SerializeObject(source_face_embedding);
button1.Enabled = true;
}
private void Form1_Load(object sender, EventArgs e)
{
face_embedding = new FaceEmbdding("model/arcface_w600k_r50.onnx");
target_path = "images/target.jpg";
source_path = "images/source.jpg";
pictureBox1.Image = new Bitmap(source_path);
pictureBox2.Image = new Bitmap(target_path);
}
}
}
FaceEmbdding.cs
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using System.Collections.Generic;
using System.Linq;
namespace FaceFusionSharp
{
internal class FaceEmbdding
{
float[] input_image;
int input_height;
int input_width;
List<Point2f> normed_template = new List<Point2f>();
SessionOptions options;
InferenceSession onnx_session;
public FaceEmbdding(string modelpath)
{
input_height = 112;
input_width = 112;
options = new SessionOptions();
options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
options.AppendExecutionProvider_CPU(0);// 设置为CPU上运行
// 创建推理模型类,读取本地模型文件
onnx_session = new InferenceSession(modelpath, options);//model_path 为onnx模型文件的路径
//在这里就直接定义了,没有像python程序里的那样normed_template = TEMPLATES.get(template) * crop_size
normed_template.Add(new Point2f(38.29459984f, 51.69630032f));
normed_template.Add(new Point2f(73.53180016f, 51.50140016f));
normed_template.Add(new Point2f(56.0252f, 71.73660032f));
normed_template.Add(new Point2f(41.54929968f, 92.36549952f));
normed_template.Add(new Point2f(70.72989952f, 92.20409968f));
}
void preprocess(Mat srcimg, List<Point2f> face_landmark_5)
{
Mat crop_img = new Mat();
Common.warp_face_by_face_landmark_5(srcimg, crop_img, face_landmark_5, normed_template, new Size(112, 112));
Mat[] bgrChannels = Cv2.Split(crop_img);
for (int c = 0; c < 3; c++)
{
bgrChannels[c].ConvertTo(bgrChannels[c], MatType.CV_32FC1, 1 / 127.5, -1.0);
}
Cv2.Merge(bgrChannels, crop_img);
foreach (Mat channel in bgrChannels)
{
channel.Dispose();
}
input_image = Common.ExtractMat(crop_img);
crop_img.Dispose();
}
internal List<float> detect(Mat srcimg, List<Point2f> face_landmark_5)
{
preprocess(srcimg, face_landmark_5);
Tensor<float> input_tensor = new DenseTensor<float>(input_image, new[] { 1, 3, input_height, input_width });
List<NamedOnnxValue> input_container = new List<NamedOnnxValue>
{
NamedOnnxValue.CreateFromTensor("input.1", input_tensor)
};
var ort_outputs = onnx_session.Run(input_container).ToArray();
float[] pdata = ort_outputs[0].AsTensor<float>().ToArray(); // 形状是(1, 512)
return pdata.ToList(); ;
}
}
}
C++代码
我们顺便看一下C++代码的实现,方便对比学习。
头文件
# ifndef FACERECOGNIZER
# define FACERECOGNIZER
#include <fstream>
#include <sstream>
//#include <cuda_provider_factory.h> ///如果使用cuda加速,需要取消注释
#include <onnxruntime_cxx_api.h>
#include"utils.h"
class FaceEmbdding
{
public:
FaceEmbdding(std::string modelpath);
std::vector<float> detect(cv::Mat srcimg, const std::vector<cv::Point2f> face_landmark_5);
private:
void preprocess(cv::Mat img, const std::vector<cv::Point2f> face_landmark_5);
std::vector<float> input_image;
int input_height;
int input_width;
std::vector<cv::Point2f> normed_template;
Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "Face Feature Extract");
Ort::Session *ort_session = nullptr;
Ort::SessionOptions sessionOptions = Ort::SessionOptions();
std::vector<char*> input_names;
std::vector<char*> output_names;
std::vector<std::vector<int64_t>> input_node_dims; // >=1 outputs
std::vector<std::vector<int64_t>> output_node_dims; // >=1 outputs
Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
};
#endif
源文件
#include"facerecognizer.h"
using namespace cv;
using namespace std;
using namespace Ort;
FaceEmbdding::FaceEmbdding(string model_path)
{
/// OrtStatus* status = OrtSessionOptionsAppendExecutionProvider_CUDA(sessionOptions, 0); ///如果使用cuda加速,需要取消注释
sessionOptions.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
/// std::wstring widestr = std::wstring(model_path.begin(), model_path.end()); windows写法
/// ort_session = new Session(env, widestr.c_str(), sessionOptions); windows写法
ort_session = new Session(env, model_path.c_str(), sessionOptions); linux写法
size_t numInputNodes = ort_session->GetInputCount();
size_t numOutputNodes = ort_session->GetOutputCount();
AllocatorWithDefaultOptions allocator;
for (int i = 0; i < numInputNodes; i++)
{
input_names.push_back(ort_session->GetInputName(i, allocator)); /// 低版本onnxruntime的接口函数
AllocatedStringPtr input_name_Ptr = ort_session->GetInputNameAllocated(i, allocator); /// 高版本onnxruntime的接口函数
input_names.push_back(input_name_Ptr.get()); /// 高版本onnxruntime的接口函数
Ort::TypeInfo input_type_info = ort_session->GetInputTypeInfo(i);
auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
auto input_dims = input_tensor_info.GetShape();
input_node_dims.push_back(input_dims);
}
for (int i = 0; i < numOutputNodes; i++)
{
output_names.push_back(ort_session->GetOutputName(i, allocator)); /// 低版本onnxruntime的接口函数
AllocatedStringPtr output_name_Ptr= ort_session->GetInputNameAllocated(i, allocator);
output_names.push_back(output_name_Ptr.get()); /// 高版本onnxruntime的接口函数
Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i);
auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
auto output_dims = output_tensor_info.GetShape();
output_node_dims.push_back(output_dims);
}
this->input_height = input_node_dims[0][2];
this->input_width = input_node_dims[0][3];
在这里就直接定义了,没有像python程序里的那样normed_template = TEMPLATES.get(template) * crop_size
this->normed_template.emplace_back(Point2f(38.29459984, 51.69630032));
this->normed_template.emplace_back(Point2f(73.53180016, 51.50140016));
this->normed_template.emplace_back(Point2f(56.0252, 71.73660032));
this->normed_template.emplace_back(Point2f(41.54929968, 92.36549952));
this->normed_template.emplace_back(Point2f(70.72989952, 92.20409968));
}
void FaceEmbdding::preprocess(Mat srcimg, const vector<Point2f> face_landmark_5)
{
Mat crop_img;
warp_face_by_face_landmark_5(srcimg, crop_img, face_landmark_5, this->normed_template, Size(112, 112));
/*vector<uchar> inliers(face_landmark_5.size(), 0);
Mat affine_matrix = cv::estimateAffinePartial2D(face_landmark_5, this->normed_template, cv::noArray(), cv::RANSAC, 100.0);
Mat crop_img;
Size crop_size(112, 112);
warpAffine(srcimg, crop_img, affine_matrix, crop_size, cv::INTER_AREA, cv::BORDER_REPLICATE);*/
vector<cv::Mat> bgrChannels(3);
split(crop_img, bgrChannels);
for (int c = 0; c < 3; c++)
{
bgrChannels[c].convertTo(bgrChannels[c], CV_32FC1, 1 / 127.5, -1.0);
}
const int image_area = this->input_height * this->input_width;
this->input_image.resize(3 * image_area);
size_t single_chn_size = image_area * sizeof(float);
memcpy(this->input_image.data(), (float *)bgrChannels[2].data, single_chn_size);
memcpy(this->input_image.data() + image_area, (float *)bgrChannels[1].data, single_chn_size);
memcpy(this->input_image.data() + image_area * 2, (float *)bgrChannels[0].data, single_chn_size);
}
vector<float> FaceEmbdding::detect(Mat srcimg, const vector<Point2f> face_landmark_5)
{
this->preprocess(srcimg, face_landmark_5);
std::vector<int64_t> input_img_shape = {1, 3, this->input_height, this->input_width};
Value input_tensor_ = Value::CreateTensor<float>(memory_info_handler, this->input_image.data(), this->input_image.size(), input_img_shape.data(), input_img_shape.size());
Ort::RunOptions runOptions;
vector<Value> ort_outputs = this->ort_session->Run(runOptions, this->input_names.data(), &input_tensor_, 1, this->output_names.data(), output_names.size());
float *pdata = ort_outputs[0].GetTensorMutableData<float>(); /// 形状是(1, 512)
const int len_feature = ort_outputs[0].GetTensorTypeAndShapeInfo().GetShape()[1];
vector<float> embedding(len_feature);
memcpy(embedding.data(), pdata, len_feature*sizeof(float));
return embedding;
}
Python代码
import cv2
import numpy as np
import onnxruntime
from utils import warp_face_by_face_landmark_5
class face_recognize:
def __init__(self, modelpath):
# Initialize model
session_option = onnxruntime.SessionOptions()
session_option.log_severity_level = 3
self.session = onnxruntime.InferenceSession(modelpath, providers=['CPUExecutionProvider'])
# self.session = onnxruntime.InferenceSession(modelpath, sess_options=session_option) ###opencv-dnn读取onnx失败
model_inputs = self.session.get_inputs()
self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
self.input_shape = model_inputs[0].shape
self.input_height = int(self.input_shape[2])
self.input_width = int(self.input_shape[3])
def preprocess(self, srcimg, face_landmark_5):
crop_img, _ = warp_face_by_face_landmark_5(srcimg, face_landmark_5, 'arcface_112_v2', (112, 112))
crop_img = crop_img / 127.5 - 1
crop_img = crop_img[:, :, ::-1].transpose(2, 0, 1).astype(np.float32)
crop_img = np.expand_dims(crop_img, axis = 0)
return crop_img
def detect(self, srcimg, face_landmark_5):
input_tensor = self.preprocess(srcimg, face_landmark_5)
# Perform inference on the image
embedding = self.session.run(None, {self.input_names[0]: input_tensor})[0]
embedding = embedding.ravel() ###拉平
normed_embedding = embedding / np.linalg.norm(embedding)
return embedding, normed_embedding
if __name__ == '__main__':
imgpath = '5.jpg'
srcimg = cv2.imread('5.jpg')
face_landmark_5 = np.array([[568.2485, 398.9512 ],
[701.7346, 399.64795],
[634.2213, 482.92694],
[583.5656, 543.10187],
[684.52405, 543.125 ]])
mynet = face_recognize('weights/arcface_w600k_r50.onnx')
embedding, normed_embedding = mynet.detect(srcimg, face_landmark_5)
print(embedding.shape, normed_embedding.shape)
其他
《C#版Facefusion:让你的脸与世界融为一体!》中的Demo程序已经在QQ群(758616458)中分享,需要的可以去QQ群文件中下载体验。
模型下载
https://docs.facefusion.io/introduction/license#models