采用mtcnn输出的特征点做人脸对齐C++代码

之前开源过一个ios项目做简单的人脸对比(https://github.com/xuduo35/ncnn-mtcnn-facenet),发现没有做人脸对齐,今天研究了下补上。

1. deep insight提供的一个头文件FaceProprocess.h,见最后

2. 从https://github.com/deepinsight/insightface/issues/481找来的一段code

a. 初始化对齐矩阵

        float v1[5][2] = {
          {30.2946f, 51.6963f},
          {65.5318f, 51.5014f},
          {48.0252f, 71.7366f},
          {33.5493f, 92.3655f},
          {62.7299f, 92.2041f}};

        cv::Mat src(5,2,CV_32FC1, v1);

        memcpy(src.data, v1, 2 * 5 * sizeof(float));

b. 获取mtcnn输出box宽resize到96时候的矩阵

              float v2[5][2] =
              {{35.916126, 43.42934 },
               {86.2129 , 49.016266},
               {55.2765 , 71.023384},
               {32.937046,87.26657 },
               {72.59505, 91.47491 }};

                for (int j = 0; j<5; j = j + 1)
                {
                    v2[j][0] = (finalBbox[i].ppoint[j]-finalBbox[0].x1)*(96.0/w);
                    v2[j][1] = (finalBbox[i].ppoint[j + 5]-finalBbox[0].y1)*(96.0/w);
                }

              cv::Mat dst(5,2,CV_32FC1, v2);

              memcpy(dst.data, v2, 2 * 5 * sizeof(float));
              cv::Mat m = FacePreprocess::similarTransform(dst ,src);

              cv::Mat map_matrix;
              cv::Rect map_matrix_r = Rect(0, 0, 3, 2);
              cv::Mat (m, map_matrix_r).copyTo(map_matrix);

              // std::cout << map_matrix << std::endl;

c. 对使用mtcnn输出box crop下来的人脸进行对齐操作

            cv::Mat croppedImageAligned;
            warpAffine(croppedImage, croppedImageAligned, map_matrix, { w, h }, CV_INTER_CUBIC | CV_WARP_FILL_OUTLIERS, BORDER_CONSTANT, cvScalarAll(0));

d. 然后可以提取特征计算相似度。

FaceProprocess.h: https://raw.githubusercontent.com/deepinsight/insightface/master/cpp-align/FacePreprocess.h

//
// Created by Jack Yu on 23/03/2018.
//

#ifndef FACE_DEMO_FACEPREPROCESS_H
#define FACE_DEMO_FACEPREPROCESS_H

#include<opencv2/opencv.hpp>


namespace FacePreprocess {

    cv::Mat meanAxis0(const cv::Mat &src)
    {
        int num = src.rows;
        int dim = src.cols;

        // x1 y1
        // x2 y2

        cv::Mat output(1,dim,CV_32F);
        for(int i = 0 ; i <  dim; i ++)
        {
            float sum = 0 ;
            for(int j = 0 ; j < num ; j++)
            {
                sum+=src.at<float>(j,i);
            }
            output.at<float>(0,i) = sum/num;
        }

        return output;
    }

    cv::Mat elementwiseMinus(const cv::Mat &A,const cv::Mat &B)
    {
        cv::Mat output(A.rows,A.cols,A.type());

        assert(B.cols == A.cols);
        if(B.cols == A.cols)
        {
            for(int i = 0 ; i <  A.rows; i ++)
            {
                for(int j = 0 ; j < B.cols; j++)
                {
                    output.at<float>(i,j) = A.at<float>(i,j) - B.at<float>(0,j);
                }
            }
        }
        return output;
    }


    cv::Mat varAxis0(const cv::Mat &src)
    {
        cv:Mat temp_ = elementwiseMinus(src,meanAxis0(src));
        cv::multiply(temp_ ,temp_ ,temp_ );
        return meanAxis0(temp_);

    }



    int MatrixRank(cv::Mat M)
    {
        Mat w, u, vt;
        SVD::compute(M, w, u, vt);
        Mat1b nonZeroSingularValues = w > 0.0001;
        int rank = countNonZero(nonZeroSingularValues);
        return rank;

    }

//    References
//    ----------
//    .. [1] "Least-squares estimation of transformation parameters between two
//    point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
//
//    """
//
//    Anthor:Jack Yu
    cv::Mat similarTransform(cv::Mat src,cv::Mat dst) {
        int num = src.rows;
        int dim = src.cols;
        cv::Mat src_mean = meanAxis0(src);
        cv::Mat dst_mean = meanAxis0(dst);
        cv::Mat src_demean = elementwiseMinus(src, src_mean);
        cv::Mat dst_demean = elementwiseMinus(dst, dst_mean);
        cv::Mat A = (dst_demean.t() * src_demean) / static_cast<float>(num);
        cv::Mat d(dim, 1, CV_32F);
        d.setTo(1.0f);
        if (cv::determinant(A) < 0) {
            d.at<float>(dim - 1, 0) = -1;

        }
        Mat T = cv::Mat::eye(dim + 1, dim + 1, CV_32F);
        cv::Mat U, S, V;
        SVD::compute(A, S,U, V);

        // the SVD function in opencv differ from scipy .


        int rank = MatrixRank(A);
        if (rank == 0) {
            assert(rank == 0);

        } else if (rank == dim - 1) {
            if (cv::determinant(U) * cv::determinant(V) > 0) {
                T.rowRange(0, dim).colRange(0, dim) = U * V;
            } else {
//            s = d[dim - 1]
//            d[dim - 1] = -1
//            T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))
//            d[dim - 1] = s
                int s = d.at<float>(dim - 1, 0) = -1;
                d.at<float>(dim - 1, 0) = -1;

                T.rowRange(0, dim).colRange(0, dim) = U * V;
                cv::Mat diag_ = cv::Mat::diag(d);
                cv::Mat twp = diag_*V; //np.dot(np.diag(d), V.T)
                Mat B = Mat::zeros(3, 3, CV_8UC1);
                Mat C = B.diag(0);
                T.rowRange(0, dim).colRange(0, dim) = U* twp;
                d.at<float>(dim - 1, 0) = s;
            }
        }
        else{
            cv::Mat diag_ = cv::Mat::diag(d);
            cv::Mat twp = diag_*V.t(); //np.dot(np.diag(d), V.T)
            cv::Mat res = U* twp; // U
            T.rowRange(0, dim).colRange(0, dim) = -U.t()* twp;
        }
        cv::Mat var_ = varAxis0(src_demean);
        float val = cv::sum(var_).val[0];
        cv::Mat res;
        cv::multiply(d,S,res);
        float scale =  1.0/val*cv::sum(res).val[0];
        T.rowRange(0, dim).colRange(0, dim) = - T.rowRange(0, dim).colRange(0, dim).t();
        cv::Mat  temp1 = T.rowRange(0, dim).colRange(0, dim); // T[:dim, :dim]
        cv::Mat  temp2 = src_mean.t(); //src_mean.T
        cv::Mat  temp3 = temp1*temp2; // np.dot(T[:dim, :dim], src_mean.T)
        cv::Mat temp4 = scale*temp3;
        T.rowRange(0, dim).colRange(dim, dim+1)=  -(temp4 - dst_mean.t()) ;
        T.rowRange(0, dim).colRange(0, dim) *= scale;
        return T;
    }


}
#endif //FACE_DEMO_FACEPREPROCESS_H

 

评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值