AI数字人:人脸修复回帖和onnx推理模型导出

为了使用YOLOv8n-face模型进行人脸检测和分离,我们需要加载YOLOv8模型并进行推理。YOLOv8模型是一个深度学习模型,专门用于实时物体检测,包括人脸检测。

下面是使用YOLOv8n-face模型进行人脸检测并结合OpenCvSharp进行后续处理的代码:

using System;
using System.Collections.Generic;
using System.Text;
using System;
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;

public class FaceProcessing3
{
private Net yoloNet;
private Net resnet18Net;

public FaceProcessing3(string yoloModelPath, string resnet18ModelPath)
{
    yoloNet = CvDnn.ReadNetFromOnnx(yoloModelPath);
    resnet18Net = CvDnn.ReadNetFromOnnx(resnet18ModelPath);
}

public Rect[] DetectFaces(Mat image)
{
    Mat inputBlob = CvDnn.BlobFromImage(image, 1 / 255.0, new Size(640, 640), new Scalar(0, 0, 0), true, false);
    yoloNet.SetInput(inputBlob);
    Mat detectionMat = yoloNet.Forward();

    var faces = new List<Rect>();
    for (int i = 0; i < detectionMat.Rows; i++)
    {
        float confidence = detectionMat.At<float>(i, 4);
        if (confidence > 0.5)
        {
            float centerX = detectionMat.At<float>(i, 0) * image.Width;
            float centerY = detectionMat.At<float>(i, 1) * image.Height;
            float width = detectionMat.At<float>(i, 2) * image.Width;
            float height = detectionMat.At<float>(i, 3) * image.Height;
            int x = (int)(centerX - width / 2);
            int y = (int)(centerY - height / 2);
            faces.Add(new Rect(x, y, (int)width, (int)height));
        }
    }
    return faces.ToArray();
}

public Rect GetCropBox(Rect faceBox, double expand, out int s)
{
    int x = faceBox.X;
    int y = faceBox.Y;
    int x1 = faceBox.Right;
    int y1 = faceBox.Bottom;
    int x_c = (x + x1) / 2;
    int y_c = (y + y1) / 2;
    int w = x1 - x;
    int h = y1 - y;
    s = (int)(Math.Max(w, h) / 2 * expand);
    return new Rect(x_c - s, y_c - s, 2 * s, 2 * s);
}

public Mat FaceSeg(Mat image)
{
    Mat blob = CvDnn.BlobFromImage(image, 1.0, new Size(224, 224), new Scalar(0, 0, 0), false, false);
    resnet18Net.SetInput(blob);
    Mat result = resnet18Net.Forward();

    // Process the result to create a segmentation mask
    // Assuming the output is a probability map for each pixel belonging to the face
    Mat segMask = new Mat(new Size(image.Width, image.Height), MatType.CV_8UC1);
    Cv2.Resize(result, segMask, new Size(image.Width, image.Height));
    return segMask;
}

public Mat GetImage(Mat image, Mat face, Rect faceBox, double upperBoundaryRatio = 0.5, double expand = 1.2)
{
    Rect cropBox = GetCropBox(faceBox, expand, out int s);
    Mat faceLarge = new Mat(image, cropBox);

    Mat maskImage = FaceSeg(faceLarge);
    Rect maskSmallRect = new Rect(faceBox.X - cropBox.X, faceBox.Y - cropBox.Y, faceBox.Width, faceBox.Height);
    Mat maskSmall = new Mat(maskImage, maskSmallRect);

    Mat maskImageFull = Mat.Zeros(faceLarge.Size(), MatType.CV_8UC1);
    maskSmall.CopyTo(new Mat(maskImageFull, maskSmallRect));

    int width = maskImageFull.Width;
    int height = maskImageFull.Height;
    int topBoundary = (int)(height * upperBoundaryRatio);
    Mat modifiedMaskImage = Mat.Zeros(faceLarge.Size(), MatType.CV_8UC1);
    Mat maskPart = new Mat(maskImageFull, new Rect(0, topBoundary, width, height - topBoundary));
    maskPart.CopyTo(new Mat(modifiedMaskImage, new Rect(0, topBoundary, width, height - topBoundary)));

    int blurKernelSize = (int)(0.1 * faceLarge.Width / 2) * 2 + 1;
    Mat blurredMask = new Mat();
    Cv2.GaussianBlur(modifiedMaskImage, blurredMask, new Size(blurKernelSize, blurKernelSize), 0);

    face.CopyTo(new Mat(faceLarge, maskSmallRect));
    blurredMask.CopyTo(new Mat(image, cropBox));

    return image;
}

static void Main(string[] args)
{
    string yoloModelPath = "path_to_yolov8.onnx";
    string resnet18ModelPath = "path_to_resnet18.onnx";

    string imagePath = "path_to_image";
    string facePath = "path_to_face";
    Mat image = Cv2.ImRead(imagePath);
    Mat face = Cv2.ImRead(facePath);

    FaceProcessing3 processor = new FaceProcessing3(yoloModelPath, resnet18ModelPath);
    Rect[] faces = processor.DetectFaces(image);

    if (faces.Length > 0)
    {
        Rect faceBox = faces[0];
        Mat result = processor.GetImage(image, face, faceBox);
        Cv2.ImShow("Result", result);
        Cv2.WaitKey(0);
    }
    else
    {
        Console.WriteLine("No faces detected.");
    }
}

}

模型导出推理源码

import torch
#from model import BiSeNet
from musetalk.utils.face_parsing.model import BiSeNet

加载预训练的BiSeNet模型

model = BiSeNet(resnet_path=‘./models/face-parse-bisent/resnet18-5c106cde.pth’,n_classes=19)
model.load_state_dict(torch.load(‘./models/face-parse-bisent/79999_iter.pth’))
model.eval()

创建示例输入

example_input = torch.randn(1, 3, 512, 512)

导出模型为ONNX格式

torch.onnx.export(model, example_input, ‘bisenet512.onnx’, input_names=[‘input’], output_names=[‘output’], opset_version=11)

【项目介绍】 基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip 基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip 基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip基于OpenCV部署yolov8的人脸检测+关键点检测源码(python和c++版本,可换成车牌检测4个角点).zip 【备注】 1.项目代码均经过功能验证ok,确保稳定可靠运行。欢迎下载食用体验! 2.主要针对各个计算机相关专业,包括计算机科学、信息安全、数据科学与大数据技术、人工智能、通信、物联网等领域的在校学生、专业教师、企业员工。 3.项目具有丰富的拓展空间,不仅可作为入门进阶,也可直接作为毕设、课程设计、大作业、初期项目立项演示等用途。 4.当然也鼓励大家基于此进行二次开发。在使用过程中,如有问题或建议,请及时沟通。 5.期待你能在项目中找到乐趣和灵感,也欢迎你的分享和反馈!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值