基于双目摄像头的传统图像处理方法实现活体检测

#include <iostream>
#include <opencv2\opencv.hpp>
#include <io.h>


#include <mutex>
#include <thread>
#include <condition_variable>
#include "util.hpp"
#include "caffe_model.h"
#include "cascade\facedetect.h"

using namespace std;
using namespace cv;

//#define ZOOM 1.0
#define DEVICE -1

static int getFaceRect(FaceDetector &facedetector, Mat &dstImg, vector<Rect> &rects, float zoom)
{
    int widths = dstImg.cols;
    int heights = dstImg.rows;
    //vector<FaceInfomation> face = pd->detect(dstImg, id, 48);
    vector<Rect2d> faces;
    std::vector<std::vector<cv::Point2d> > points;
    facedetector.detect(dstImg, faces, points);
    //vector<Rect> face = face_detect.face_detect_frontal(dstImg, 48, 1.2f);
    //int status = dpUtil.getpts(path, landmark);
    int length = faces.size();
    if (length == 0)
    {
        //cout << "not found face ." << endl;
        return 0;
    }
    for (int j = 0; j < length; j++)
    {
        Rect rect = faces[j];

        int rect_w = rect.width;
        int rect_h = rect.height;
        rect.width = rect_w * zoom;
        rect.height = rect_h * zoom;
        rect.x = rect.x - (zoom - 1.0) * rect_w / 2.0;
        rect.y = rect.y - (zoom - 1.0) * rect_h / 2.0;
        if (rect.x < 0)
        {
            rect.x = 0;
        }
        if (rect.y < 0)
        {
            rect.y = 0;
        }
        if (rect.x + rect.width > widths)
        {
            rect.width = widths - rect.x;
            //rect.height = rect_h * rect.width / rect_w;
        }
        if (rect.y + rect.height > heights)
        {
            rect.height = heights - rect.y;
            //rect.width = rect_w * rect.height / rect_h;
        }
        rects.push_back(rect);
        //只要一个 位置
    }
    return length;
}

void getFiles(string path, vector<string>& files, vector<string>& filenames, const string & tail)
{
    //文件句柄  
    intptr_t hFile = 0;
    //文件信息  
    struct _finddata_t fileinfo;
    string p;
    if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
    {
        do
        {
            //如果是目录,迭代之  
            //如果不是,加入列表  
            if ((fileinfo.attrib &  _A_SUBDIR))
            {
                if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0)
                {
                    getFiles(p.assign(path).append("\\").append(fileinfo.name), files, filenames, tail);
                }

            }
            else
            {
                string filename = fileinfo.name;
                size_t found = filename.find(tail);
                if (found != string::npos)
                {
                    files.push_back(p.assign(path).append("\\").append(filename));
                    filenames.push_back(filename);
                }
            }
        } while (_findnext(hFile, &fileinfo) == 0);

        _findclose(hFile);
    }
}
void generate_bad_img_filter() {
    string good_img = "D:\\picture_shape\\face\\face_good";
    string result_img = "D:\\picture_shape\\face\\face_bad\\";
    vector<string> files, file_names;
    getFiles(good_img, files, file_names, ".jpg");

    int length = files.size();
    for (size_t i = 0; i < length; i++)
    {
        cv::Mat img = imread(files[i]);

        cv::Mat dst;
        /*GaussianBlur(img, dst, Size(3, 3));*/
        blur(img, dst, Size(5, 5));//blur函数直接进行模糊化处理
        blur(dst, dst, Size(5, 5));//blur函数直接进行模糊化处理
        blur(dst, dst, Size(3, 3));//blur函数直接进行模糊化处理
        GaussianBlur(dst, dst, Size(3, 3), 11, 11);
        imwrite(result_img + file_names[i], dst);
    }
}

cv::Mat fftd(cv::Mat img, bool backwards)
{
    if (img.channels() == 1)
    {
        cv::Mat planes[] = { cv::Mat_<float>(img), cv::Mat_<float>::zeros(img.size()) };
        cv::merge(planes, 2, img);
    }
    cv::dft(img, img, backwards ? (cv::DFT_INVERSE | cv::DFT_SCALE) : 0);

    return img;
}
void generate_bad_img_dft() {
    string good_img = "D:\\picture_shape\\face\\face_good";
    string result_img = "D:\\picture_shape\\face\\face_bad\\";
    vector<string> files, file_names;
    getFiles(good_img, files, file_names, ".jpg");

    int length = files.size();
    for (size_t i = 0; i < length; i++)
    {
        cv::Mat input = imread(files[i]);

        cvtColor(input, input, CV_RGB2GRAY);

        int w = getOptimalDFTSize(input.cols);
        int h = getOptimalDFTSize(input.rows);//获取最佳尺寸,快速傅立叶变换要求尺寸为2的n次方
        Mat padded;
        copyMakeBorder(input, padded, 0, h - input.rows, 0, w - input.cols, BORDER_CONSTANT, Scalar::all(0));//填充图像保存到padded中

        Mat plane[] = { Mat_<float>(padded), Mat_<float>::zeros(padded.size()) };//创建通道
        Mat complexIm;
        merge(plane, 2, complexIm);//合并通道
        
        dft(complexIm, complexIm);//进行傅立叶变换,结果保存在自身

        int rows = complexIm.rows;
        int cols = complexIm.cols;
        int offsetX = 10;
        int offsetY = 10;
        float mean = 0;
        int count = 0;
        for (size_t i = 0; i < rows; i++)
        {
            float *ptr = complexIm.ptr<float>(i);
            for (size_t j = 0; j < cols * 2; j = j+2)
            {
                //if (i > offset && j > offset && (i < (rows - offset) && j < (cols - offset))) {

                if ((i > offsetX && i < (rows -offsetX)) && (j > 2*offsetY && j < (2*cols-2*offsetY))){
                    /*ptr[j] = 0;
                    ptr[j + 1] = 0;*/

                    mean += sqrt(ptr[j] * ptr[j] + ptr[j + 1] * ptr[j + 1]);
                    count++;
                    //sqrt(ptr[j] * ptr[j] + ptr[j + 1] * ptr[j + 1]);
                }

                //if ((i < offsetX || i > (rows - offsetX)) || (j < 2 * offsetY || j > (2 * cols - 2 * offsetY))) {
                //    ptr[j] = 0;
                //    ptr[j + 1] = 0;
                //}
            }
        }
        std::cout << "mean is :" << mean / count << std::endl;
        
        split(complexIm, plane);//分离通道
        
        
        magnitude(plane[0], plane[1], plane[0]);//获取幅度图像,0通道为实数通道,1为虚数,因为二维傅立叶变换结果是复数
        
        int cx = padded.cols / 2; int cy = padded.rows / 2;//一下的操作是移动图像,左上与右下交换位置,右上与左下交换位置
        Mat temp;
        Mat part1(plane[0], Rect(0, 0, cx, cy));
        Mat part2(plane[0], Rect(cx, 0, cx, cy));
        Mat part3(plane[0], Rect(0, cy, cx, cy));
        Mat part4(plane[0], Rect(cx, cy, cx, cy));


        part1.copyTo(temp);
        part4.copyTo(part1);
        temp.copyTo(part4);

        part2.copyTo(temp);
        part3.copyTo(part2);
        temp.copyTo(part3);


        //*******************************************************************
        plane[0] += Scalar::all(1);//傅立叶变换后的图片不好分析,进行对数处理,结果比较好看
        log(plane[0], plane[0]);
        normalize(plane[0], plane[0], 1, 0, CV_MINMAX);

        imshow("dft", plane[0]);
       

        //*******************************************************************
        //Mat _complexim(complexIm,Rect(padded.cols/4,padded.rows/4,padded.cols/2,padded.rows/2));
        //opyMakeBorder(_complexim,_complexim,padded.rows/4,padded.rows/4,padded.cols/4,padded.cols/4,BORDER_CONSTANT,Scalar::all(0.75));
        Mat _complexim;
        complexIm.copyTo(_complexim);//把变换结果复制一份,进行逆变换,也就是恢复原图
        Mat iDft[] = { Mat::zeros(plane[0].size(),CV_32F),Mat::zeros(plane[0].size(),CV_32F) };//创建两个通道,类型为float,大小为填充后的尺寸
        idft(_complexim, _complexim);//傅立叶逆变换
        split(_complexim, iDft);//结果貌似也是复数
        
        magnitude(iDft[0], iDft[1], iDft[0]);
        normalize(iDft[0], iDft[0], 1, 0, CV_MINMAX);//归一化处理,float类型的显示范围为0-1,大于1为白色,小于0为黑色
        imshow("idft", iDft[0]);//显示逆变换
        waitKey(0);

    }
}

void getImageQualityAssessment(cv::Mat img, float & sobel_mean, float & lap_var) {
    //可以直接根据灰度图的方差 做判断
    //cv::resize(img, img, Size(128, 128));
    //Tenengrad 梯度
    cv::Mat imageSobel;
    Sobel(img, imageSobel, CV_16U, 1, 1);
    double meanValue = 0.0;
    meanValue = mean(imageSobel)[0];
    sobel_mean = meanValue;
    

    //double t0 = (double)cvGetTickCount();
    //Laplacian
    cv::Mat dst;
    //Laplace(f) = \dfrac{\partial^{2} f}{\partial x^{2}} + \dfrac{\partial^{2} f}{\partial y^{2}}
    int kernel_size = 3;
    int ddepth = CV_16U;
    Laplacian(img, dst, ddepth, kernel_size, BORDER_DEFAULT);
    //cv::imshow("laplac", dst);
    Mat tmp_m, tmp_sd;
    double m = 0, sd = 0;
    meanStdDev(dst, tmp_m, tmp_sd);
    m = tmp_m.at<double>(0, 0);
    sd = tmp_sd.at<double>(0, 0);
    //cout << "laplacian......... Mean: " << m << " ,laplacian StdDev: " << sd << endl;
    lap_var = sd;

    //double t1 = (double)cvGetTickCount();

   // std::cout << "Laplacian cost time is:" << (t1 - t0) / ((double)cvGetTickFrequency() * 1000) << " ms" << std::endl;


}

void get_quality_value() {
    //为了减少 其他部分影响,可以 取人脸的鼻子区域。
    string good_img = "D:\\picture_shape\\face\\face_good";
    string bad_img = "D:\\picture_shape\\face\\face_bad";
    vector<string> files, file_names;
    getFiles(good_img, files, file_names, ".jpg");


    vector<string> files1, file_names1;
    getFiles(bad_img, files1, file_names1, ".jpg");

    int length = files.size();

    int min_good;
    int max_bad;
    for (size_t i = 0; i < length; i++)
    {
        cv::Mat img = imread(files[i]);
        float sobel_mean, lap_var;
        getImageQualityAssessment(img, sobel_mean, lap_var);

        

        cv::Mat img1 = imread(files1[i]);
        float sobel_mean1, lap_var1;
        getImageQualityAssessment(img1, sobel_mean1, lap_var1);

        std::cout << file_names[i]<<" sobel_mean " << sobel_mean << ":" << sobel_mean1 << " lap_var " << lap_var << ":" << lap_var1 << endl;


        std::cout << file_names[i] << "Tenengrad  " << sobel_mean << ":" << sobel_mean1 << "     Laplice " << lap_var << ":" << lap_var1 << endl;


    }

}


void get_img_dft_var() {
    string good_img = "D:\\picture_shape\\face\\face_good\\";
    string bad_img = "D:\\picture_shape\\face\\face_bad\\";
    vector<string> files, file_names;
    getFiles(good_img, files, file_names, ".jpg");


    vector<string> files1, file_names1;
    getFiles(bad_img, files1, file_names1, ".jpg");

    int length = files.size();
    for (size_t i = 0; i < length; i++)
    {
        cv::Mat input = imread(files[i]);

        cvtColor(input, input, CV_RGB2GRAY);

        int w = getOptimalDFTSize(input.cols);
        int h = getOptimalDFTSize(input.rows);//获取最佳尺寸,快速傅立叶变换要求尺寸为2的n次方
        Mat padded;
        copyMakeBorder(input, padded, 0, h - input.rows, 0, w - input.cols, BORDER_CONSTANT, Scalar::all(0));//填充图像保存到padded中

        Mat plane[] = { Mat_<float>(padded), Mat_<float>::zeros(padded.size()) };//创建通道
        Mat complexIm;
        merge(plane, 2, complexIm);//合并通道

        dft(complexIm, complexIm);//进行傅立叶变换,结果保存在自身
        int rows = complexIm.rows;
        int cols = complexIm.cols;
        int offsetX = 10;
        int offsetY = 10;
        float mean = 0;
        int count = 0;
        for (size_t i = 0; i < rows; i++)
        {
            float *ptr = complexIm.ptr<float>(i);
            for (size_t j = 0; j < cols * 2; j = j + 2)
            {
                if ((i > offsetX && i < (rows - offsetX)) && (j > 2 * offsetY && j < (2 * cols - 2 * offsetY))) {
                    mean += sqrt(ptr[j] * ptr[j] + ptr[j + 1] * ptr[j + 1]);
                    count++;
                }
            }
        }


        cv::Mat input1 = imread(files1[i]);

        cvtColor(input1, input1, CV_RGB2GRAY);

        int w1 = getOptimalDFTSize(input1.cols);
        int h1 = getOptimalDFTSize(input1.rows);//获取最佳尺寸,快速傅立叶变换要求尺寸为2的n次方
        Mat padded1;
        copyMakeBorder(input1, padded1, 0, h1 - input1.rows, 0, w1 - input1.cols, BORDER_CONSTANT, Scalar::all(0));//填充图像保存到padded中

        Mat plane1[] = { Mat_<float>(padded1), Mat_<float>::zeros(padded1.size()) };//创建通道
        Mat complexIm1;
        merge(plane1, 2, complexIm1);//合并通道

        dft(complexIm1, complexIm1);//进行傅立叶变换,结果保存在自身
        int rows1 = complexIm1.rows;
        int cols1 = complexIm1.cols;
        int offsetX1 = 10;
        int offsetY1 = 10;
        float mean1 = 0;
        int count1 = 0;
        for (size_t i = 0; i < rows1; i++)
        {
            float *ptr = complexIm1.ptr<float>(i);
            for (size_t j = 0; j < cols1 * 2; j = j + 2)
            {
                if ((i > offsetX1 && i < (rows1 - offsetX1)) && (j > 2 * offsetY1 && j < (2 * cols1 - 2 * offsetY1))) {
                    mean1 += sqrt(ptr[j] * ptr[j] + ptr[j + 1] * ptr[j + 1]);
                    count1++;
                }
            }
        }


        cv::Mat img = imread(files[i]);
        float sobel_mean, lap_var;
        getImageQualityAssessment(img, sobel_mean, lap_var);



        cv::Mat img1 = imread(files1[i]);
        float sobel_mean1, lap_var1;
        getImageQualityAssessment(img1, sobel_mean1, lap_var1);

        //std::cout << file_names[i] << " sobel_mean " << sobel_mean << ":" << sobel_mean1 << " lap_var " << lap_var << ":" << lap_var1 << endl;

        //std::cout <<files[i]<<" mean "<< mean/count << "    mean1 is :" << mean1 / count1 << std::endl;

        std::cout << "Tenengrad  " << sobel_mean << " : " << sobel_mean1 << "     Laplice " << lap_var << " : " << lap_var1 
            << "     FFT " << mean/count<<" : " <<mean1/count1<< endl;


        //split(complexIm, plane);//分离通道
        //magnitude(plane[0], plane[1], plane[0]);//获取幅度图像,0通道为实数通道,1为虚数,因为二维傅立叶变换结果是复数
        //int cx = padded.cols / 2; int cy = padded.rows / 2;//一下的操作是移动图像,左上与右下交换位置,右上与左下交换位置
        //Mat temp;
        //Mat part1(plane[0], Rect(0, 0, cx, cy));
        //Mat part2(plane[0], Rect(cx, 0, cx, cy));
        //Mat part3(plane[0], Rect(0, cy, cx, cy));
        //Mat part4(plane[0], Rect(cx, cy, cx, cy));

        //part1.copyTo(temp);
        //part4.copyTo(part1);
        //temp.copyTo(part4);

        //part2.copyTo(temp);
        //part3.copyTo(part2);
        //temp.copyTo(part3);

        *******************************************************************
        //plane[0] += Scalar::all(1);//傅立叶变换后的图片不好分析,进行对数处理,结果比较好看
        //log(plane[0], plane[0]);
        //normalize(plane[0], plane[0], 1, 0, CV_MINMAX);
        //imshow("dft", plane[0]);
    }


}

void get_img_mean_var(cv::Mat img1, double &m, double &sd) {
    cv::Mat tmp_m, tmp_sd;
    meanStdDev(img1, tmp_m, tmp_sd);
    m = tmp_m.at<double>(0, 0);
    sd = tmp_sd.at<double>(0, 0);
    //cout << "Mean: " << m << " , StdDev: " << sd << endl;
}
double get_sobel_mean(cv::Mat face_img) {
    Mat edgeVertical, edgeHorizontal;
    double t = (double)getTickCount();
    Sobel(face_img, edgeHorizontal, CV_8U, 0, 1, 3, 1, 0);
    Sobel(face_img, edgeVertical, CV_8U, 1, 0, 3, 1, 0);

    Scalar s = cv::mean(edgeVertical);
    Scalar s1 = cv::mean(edgeHorizontal);
    //std::cout << " s: " << s[0] << " " << s[1] << " " << s[2] << std::endl;
    cv::imshow("gray", edgeVertical);
    //std::cout << " MeanValue:" << s[0] + s1[0] << std::endl;
    return s[0] + s1[0];
}

double get_laplica_var(cv::Mat img) {
    //Laplacian
    cv::Mat dst;
    //Laplace(f) = \dfrac{\partial^{2} f}{\partial x^{2}} + \dfrac{\partial^{2} f}{\partial y^{2}}
    int kernel_size = 3;
    int ddepth = CV_16U;
    Laplacian(img, dst, ddepth, kernel_size, BORDER_DEFAULT);
    //cv::imshow("laplac", dst);
    Mat tmp_m, tmp_sd;
    double m = 0, sd = 0;
    meanStdDev(dst, tmp_m, tmp_sd);
    m = tmp_m.at<double>(0, 0);
    sd = tmp_sd.at<double>(0, 0);
    //cout << "laplacian......... Mean: " << m << " ,laplacian StdDev: " << sd << endl;
    return m;
}

double get_tenengrad_mean(cv::Mat img) {
    Mat imageSobel;
    Sobel(img, imageSobel, CV_16U, 1, 1);
    return  mean(imageSobel)[0];
}


double get_brenner(Mat& image)
{
    double score = 0.0;
    for (int i = 0; i < image.rows; ++i)
    {
        uchar *ptr = image.ptr<uchar>(i);
        for (int j = 0; j < (image.cols - 2)*image.channels(); ++j)
        {
            score += (ptr[j + 2] - ptr[j])*(ptr[j + 2] - ptr[j]);
        }
    }
    return score / (image.rows*image.cols);
}
double get_smd2(Mat & image)
{
    unsigned long score = 0;
    for (int i = 0; i < image.rows - 1; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        uchar* ptr_1 = image.ptr<uchar>(i + 1);
        for (int j = 0; j < (image.cols - 1)*image.channels(); ++j)
        {
            score += (ptr[j] - ptr[j + 1]) * (ptr[j] - ptr_1[j]);
        }
    }
    return score;
}

double get_energy(Mat& image)
{
    unsigned long score = 0;
    for (int i = 0; i < image.rows - 1; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        uchar* ptr_1 = image.ptr<uchar>(i + 1);
        for (int j = 0; j < (image.cols - 1) * image.channels(); ++j)
        {
            score += pow(ptr[j + 1] - ptr[j], 2) + pow(ptr_1[j] - ptr[j], 2);
        }
    }
    return score;
}
double get_jpeg(Mat& image)
{
    //horizontal calculate
    auto b_h = 0.0;
    for (int i = 1; i < floor(image.rows / 8) - 1; ++i)
    {
        uchar* ptr = image.ptr<uchar>(8 * i);
        uchar* ptr_1 = image.ptr<uchar>(8 * i + 1);
        for (int j = 1; j < image.cols; ++j)
        {
            b_h += abs(ptr_1[j] - ptr[j]);
        }
    }
    b_h *= 1 / (image.cols*(floor(image.rows / 8) - 1));

    auto a_h = 0.0;
    for (int i = 1; i < image.rows - 1; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        uchar* ptr_1 = image.ptr<uchar>(i + 1);
        for (int j = 1; j < image.cols; ++j)
        {
            a_h += abs(ptr_1[j] - ptr[j]);
        }
    }
    a_h = (a_h * 8.0 / (image.cols * (image.rows - 1)) - b_h) / 7;

    auto z_h = 0.0;
    for (int i = 1; i < image.rows - 2; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        uchar* ptr_1 = image.ptr<uchar>(i + 1);
        for (int j = 1; j < image.cols; ++j)
        {
            z_h += (ptr_1[j] - ptr[j]) * (ptr_1[j + 1] - ptr[j]) > 0 ? 0 : 1;
        }
    }
    z_h *= 1.0 / (image.cols* (image.rows - 2));

    //vertical calculate
    auto b_v = 0.0;
    for (int i = 1; i < image.rows; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        for (int j = 1; j < floor(image.cols / 8) - 1; ++j)
        {
            b_v += abs(ptr[8 * j + 1] - ptr[8 * j]);
        }
    }
    b_v *= 1.0 / (image.rows*(floor(image.cols / 8) - 1));

    auto a_v = 0.0;
    for (int i = 1; i < image.rows; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        for (int j = 1; j < image.cols - 1; ++j)
        {
            a_v += abs(ptr[j + 1] - ptr[j]);
        }
    }
    a_v = (a_v * 8.0 / (image.rows * (image.cols - 1)) - b_v) / 7;

    auto z_v = 0.0;
    for (int i = 1; i < image.rows; ++i)
    {
        uchar* ptr = image.ptr<uchar>(i);
        for (int j = 1; j < image.cols - 2; ++j)
        {
            z_v += (ptr[j + 1] - ptr[j]) * (ptr[j + 2] - ptr[j + 1]) > 0 ? 0 : 1;
        }
    }
    z_v *= 1.0 / (image.rows* (image.cols - 2));

    
    auto B = (b_v + b_h) / 2;
    auto A = (a_h + a_v) / 2;
    auto Z = (z_h + z_v) / 2;
    auto alpha = -245.9, beta = 261.9, gamma1 = -0.024, gamma2 = 0.016, gamma3 = 0.0064;

    auto S = alpha + beta*pow(B, gamma1)*pow(A, gamma2)*pow(Z, gamma3);

    return S;
}


void colorException(Mat InputImg, float& cast, float& da, float& db)
{
    Mat LABimg;
    cvtColor(InputImg, LABimg, CV_BGR2Lab);
    //由于OpenCV定义的格式是uint8,这里输出的LABimg从标准的0~100,-127~127,-127~127,被映射到了0~255,0~255,0~255空间
    float a = 0, b = 0;
    int HistA[256], HistB[256];
    for (int i = 0; i<256; i++)
    {
        HistA[i] = 0;
        HistB[i] = 0;
    }
    for (int i = 0; i < LABimg.rows; i++)
    {
        for (int j = 0; j < LABimg.cols; j++)
        {
            a += float(LABimg.at<cv::Vec3b>(i,j)[1] - 128);//在计算过程中,要考虑将CIE L*a*b*空间还原后同 
            b += float(LABimg.at<cv::Vec3b>(i,j)[2] - 128);
            int x = LABimg.at<cv::Vec3b>(i,j)[1];
            int y = LABimg.at<cv::Vec3b>(i,j)[2];
            HistA[x]++;
            HistB[y]++;
        }
    }
    da = a / float(LABimg.rows*LABimg.cols);
        db = b / float(LABimg.rows*LABimg.cols);
    float D = sqrt(da*da + db*db);
    float Ma = 0, Mb = 0;
    for (int i = 0; i<256; i++)
    {
        Ma += abs(i - 128 - da)*HistA[i];//计算范围-128~127 
        Mb += abs(i - 128 - db)*HistB[i];
    }
    Ma /= float((LABimg.rows*LABimg.cols));
    Mb /= float((LABimg.rows*LABimg.cols));
    float M = sqrt(Ma*Ma + Mb*Mb);
    float K = D / M;
    cast = K;
    printf("色偏指数: %f\n", cast);
    if (cast>1)
        printf("存在色偏\n");
    else
        printf("不存在色偏\n");
    return;
}

void brightnessException(Mat InputImg, float& cast, float& da)
{
    Mat GRAYimg;
    cvtColor(InputImg, GRAYimg, CV_BGR2GRAY);
    float a = 0;
    int Hist[256];
    for (int i = 0; i<256; i++)
        Hist[i] = 0;
    for (int i = 0; i < GRAYimg.rows ; i++)
    {
        for (int j = 0; j < GRAYimg.cols; j++)
        {
            a += float(GRAYimg.at<uchar>(i,j) - 128);//在计算过程中,考虑128为亮度均值点 
            int x = GRAYimg.at<uchar>(i,j);
            Hist[x]++;
        }
    }
    da = a / float(GRAYimg.rows*InputImg.cols);
        float D = abs(da);
        float Ma = 0;
        for (int i = 0; i<256; i++)
        {
            Ma += abs(i - 128 - da)*Hist[i];
        }
    Ma /= float((GRAYimg.rows*GRAYimg.cols));
    float M = abs(Ma);
    float K = D / M;
    cast = K;
    printf("亮度指数: %f\n", cast);
    if (cast>1)
    {
        printf("亮度:");
        if (da>0)
            printf("过亮\n");
        else
            printf("过暗\n");
    }
    else
        printf("亮度:正常\n");
    return;
}

float get_img_dft_mean(cv::Mat input)
{
    cvtColor(input, input, CV_RGB2GRAY);
    int w = getOptimalDFTSize(input.cols);
    int h = getOptimalDFTSize(input.rows);//获取最佳尺寸,快速傅立叶变换要求尺寸为2的n次方
    Mat padded;
    copyMakeBorder(input, padded, 0, h - input.rows, 0, w - input.cols, BORDER_CONSTANT, Scalar::all(0));//填充图像保存到padded中

    Mat plane[] = { Mat_<float>(padded), Mat_<float>::zeros(padded.size()) };//创建通道
    Mat complexIm;
    merge(plane, 2, complexIm);//合并通道

    dft(complexIm, complexIm);//进行傅立叶变换,结果保存在自身
    int rows = complexIm.rows;
    int cols = complexIm.cols;
    int offsetX = rows/6;
    int offsetY = cols/6;
    float mean = 0;
    int count = 0;
    for (size_t i = 0; i < rows; i++)
    {
        float *ptr = complexIm.ptr<float>(i);
        for (size_t j = 0; j < cols * 2; j = j + 2)
        {
            if ((i > offsetX && i < (rows - offsetX)) && (j > 2 * offsetY && j < (2 * cols - 2 * offsetY))) {
                mean += sqrt(ptr[j] * ptr[j] + ptr[j + 1] * ptr[j + 1]);
                count++;
            }
        }
    }
    return mean/count;
}
void use_camera_check() {
    cv::VideoCapture cap(0);
    if (!cap.isOpened())
    {
        std::cout << "Unable to connect to camera" << std::endl;
        return;
    }
    std::ostringstream outtext;
    //main loop
    cv::Mat temp, gray;
    while (true)
    {
        // Grab a frame
        cap >> temp;
        cvtColor(temp, gray, CV_BGR2GRAY);
        float sobel_mean, lap_var;
        getImageQualityAssessment(gray, sobel_mean, lap_var);

        float dft_mean = get_img_dft_mean(temp);

        outtext << "sobel_mean: " << sobel_mean<< "  laplacise_var:"<< lap_var << "  dft_mean"<< dft_mean;
        cv::putText(temp, outtext.str(), cv::Point(20, 40), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255));
        outtext.str("");

        //press esc to end
        cv::imshow("gray", gray);
        cv::imshow("demo", temp);
        unsigned char key = cv::waitKey(1);
        if (key == 27)
        {
            break;
        }
    }
}


void wflw_dataset_check() {
    string base = "D:\\WFLW\\wflw_blur_128\\augment_img_brue_train_128\\";

    string img_path = base;
    string text_path = base + "brue_128.txt";

    vector<string> file_data = util::readStringFromFileData(text_path);

    vector<float>sobel_mean_f, sobel_mean_t;
    vector<float>lap_var_f, lap_var_t;
    vector<float>dft_mean_f, dft_mean_t;

    int length = file_data.size();
    for (size_t i = 0; i < length; i++)
    {
        vector<string> item;
        util::splitString(file_data[i], item, " ");
        if (item.size() < 2) {
            std::cout << "item style error.  :" << file_data[i] << std::endl;
            continue;
        }
        string file_name = item[0];
        string blur = item[1];

        cv::Mat img = imread(img_path + file_name);

        float sobel_mean, lap_var, dft_mean;
        getImageQualityAssessment(img, sobel_mean, lap_var);
        dft_mean = get_img_dft_mean(img);

        if (blur == "1") {
            sobel_mean_f.push_back(sobel_mean);
            lap_var_f.push_back(lap_var);
            dft_mean_f.push_back(dft_mean);
        }
        else if(blur == "0"){
            sobel_mean_t.push_back(sobel_mean);
            lap_var_t.push_back(lap_var);
            dft_mean_t.push_back(dft_mean);
        }
    }

    std::cout << "f size:" << sobel_mean_f.size() << " t size:" << sobel_mean_t.size() << std::endl;

    int f = sobel_mean_f.size();
    int t = sobel_mean_t.size();

    float mean_sobel_f=0, mean_sobel_t=0;
    float mean_lap_f=0, mean_lap_t=0;
    float mean_dft_f=0, mean_dft_t=0;
    for (size_t i = 0; i < f; i++)
    {
        mean_sobel_f += sobel_mean_f[i];
        mean_lap_f += lap_var_f[i];
        mean_dft_f += dft_mean_f[i];
    }
    mean_sobel_f = mean_sobel_f / f;
    mean_lap_f = mean_lap_f / f;
    mean_dft_f = mean_dft_f / f;

    for (size_t i = 0; i < t; i++)
    {
        mean_sobel_t += sobel_mean_t[i];
        mean_lap_t += lap_var_t[i];
        mean_dft_t += dft_mean_t[i];
    }
    mean_sobel_t = mean_sobel_t / t;
    mean_lap_t = mean_lap_t / t;
    mean_dft_t = mean_dft_t / t;

    std::cout << " f-------- " << mean_sobel_f << "  " << mean_lap_f << "  " << mean_dft_f << std::endl;
    std::cout << " t-------- " << mean_sobel_t << "  " << mean_lap_t << "  " << mean_dft_t << std::endl;
    float threadhold_sobel = (mean_sobel_f + mean_sobel_t) / 2;  //3.04483     推荐值:2.5
    float threadhold_lap = (mean_lap_f + mean_lap_t) / 2;   // 104.823                 95
    float threadhold_dft = (mean_dft_f + mean_dft_t) / 2;   // 189.142                 175 

    //accuracy:
    int all_img = f + t;

    int count_sobel = 0;
    int count_lap = 0;
    int count_dft = 0;
    for (size_t i = 0; i < f; i++)
    {
        if (sobel_mean_f[i] < threadhold_sobel) {
            count_sobel++;
        }
        if (lap_var_f[i] < threadhold_lap) {
            count_lap++;
        }
        if (dft_mean_f[i] < threadhold_dft) {
            count_dft++;
        }
    }
    for (size_t i = 0; i < t; i++)
    {
        if (sobel_mean_t[i] > threadhold_sobel) {
            count_sobel++;
        }
        if (lap_var_t[i] > threadhold_lap) {
            count_lap++;
        }
        if (dft_mean_t[i] > threadhold_dft) {
            count_dft++;
        }
    }
    std::cout << "threadhold:" << threadhold_sobel << "   " << threadhold_lap << "    " << threadhold_dft << std::endl;
    std::cout << "accuracy sobel_mean : lap_var : dft_mean = " << (float)count_sobel / all_img << " : " << (float)count_lap / all_img << " : " << (float)count_dft / all_img << std::endl;
}

void wflw_dataset_check_test() {
    string base = "D:\\WFLW\\wflw_blur_128\\augment_img_brue_test_128\\";

    string img_path = base;
    string text_path = base + "brue_128.txt";

    vector<string> file_data = util::readStringFromFileData(text_path);

    vector<float>sobel_mean_f, sobel_mean_t;
    vector<float>lap_var_f, lap_var_t;
    vector<float>dft_mean_f, dft_mean_t;



        int length = file_data.size();
        for (size_t i = 0; i < length; i++)
        {
            vector<string> item;
            util::splitString(file_data[i], item, " ");
            if (item.size() < 2) {
                std::cout << "item style error.  :" << file_data[i] << std::endl;
                continue;
            }
            string file_name = item[0];
            string blur = item[1];

            cv::Mat img = imread(img_path + file_name);

            float sobel_mean, lap_var, dft_mean;
            getImageQualityAssessment(img, sobel_mean, lap_var);
            dft_mean = get_img_dft_mean(img);

            if (blur == "1") {
                sobel_mean_f.push_back(sobel_mean);
                lap_var_f.push_back(lap_var);
                dft_mean_f.push_back(dft_mean);
            }
            else if (blur == "0") {
                sobel_mean_t.push_back(sobel_mean);
                lap_var_t.push_back(lap_var);
                dft_mean_t.push_back(dft_mean);
            }
        }

        std::cout << "f size:" << sobel_mean_f.size() << " t size:" << sobel_mean_t.size() << std::endl;

        int f = sobel_mean_f.size();
        int t = sobel_mean_t.size();

        for (size_t temp = 0; temp < 100; temp++)
        {
            float threadhold_sobel = 1.5f + (temp + 1) / 50.0f;
            float threadhold_lap = 60 + (temp + 1) / 2.0f;
            float threadhold_dft = 60 + (temp + 1) / 2.0f;
            
            //accuracy:
            int all_img = f + t;

            int count_sobel = 0;
            int count_lap = 0;
            int count_dft = 0;
            for (size_t i = 0; i < f; i++)
            {
                if (sobel_mean_f[i] < threadhold_sobel) {
                    count_sobel++;
                }
                if (lap_var_f[i] < threadhold_lap) {
                    count_lap++;
                }
                if (dft_mean_f[i] < threadhold_dft) {
                    count_dft++;
                }
            }
            for (size_t i = 0; i < t; i++)
            {
                if (sobel_mean_t[i] > threadhold_sobel) {
                    count_sobel++;
                }
                if (lap_var_t[i] > threadhold_lap) {
                    count_lap++;
                }
                if (dft_mean_t[i] > threadhold_dft) {
                    count_dft++;
                }
            }
            std::cout << "threadhold:" << threadhold_sobel << "   " << threadhold_lap << "    " << threadhold_dft << std::endl;
            std::cout << "accuracy sobel_mean : lap_var : dft_mean = " << (float)count_sobel / all_img << " : " << (float)count_lap / all_img << " : " << (float)count_dft / all_img << std::endl;
        }
}


float caculate_residual_pixel(cv::Mat face_img) {
    //threadhold 30 
    int channels = face_img.channels();
    int rows = face_img.rows;
    int cols = face_img.cols;
    cv::imshow("face_imgss", face_img);
    if (channels == 3) {
        int ele_cols = cols*channels;
        float sum = 0;
        for (size_t i = 0; i < rows; i++)
        {
            uchar * ptr = face_img.ptr<uchar>(i);
            for (size_t j = 0; j < ele_cols; j += 3)
            {
                sum += abs(ptr[j] - ptr[j + 1]) + abs(ptr[j] - ptr[j + 2]) + abs(ptr[j + 1] - ptr[j + 2]);
            }
        }
        return sum / (rows*cols);
    }
    

    return -1;
}

float caculate_img_mean(cv::Mat face_img) 
{
    return mean(face_img)[0];
}

double get_sobel_mean_with_landmark(cv::Mat img, vector<float> landmark) {
    
    Mat edgeVertical, edgeHorizontal;
    Sobel(img, edgeHorizontal, CV_8U, 0, 1, 3, 1, 0);
    Sobel(img, edgeVertical, CV_8U, 1, 0, 3, 1, 0);

    Point2f left_eye = Point2f(landmark[0], landmark[1]);
    Point2f right_eye = Point2f(landmark[2], landmark[3]);
    Point2f nose = Point2f(landmark[4], landmark[5]);
    Point2f left_mouth = Point2f(landmark[6], landmark[7]);
    Point2f right_mouth = Point2f(landmark[8], landmark[9]);

    Point2f mid_eye = (left_eye + right_eye) / 2;
    Point2f mid_mouth = (left_mouth + right_mouth) / 2;
    Point2f mid_left = (left_eye + left_mouth) / 2;
    Point2f mid_right = (right_eye + right_mouth) / 2;

    float mid_width = mid_right.x - mid_left.x;
    mid_left.x = mid_left.x  - mid_width /4.0f;
    mid_right.x = mid_right.x + mid_width / 4.0f;

    float k1 = (mid_eye.y - mid_left.y) / (mid_eye.x - mid_left.x);
    float k2 = (mid_eye.y - mid_right.y) / (mid_eye.x - mid_right.x);
    float k3 = (mid_left.y - mid_mouth.y) / (mid_left.x - mid_mouth.x);
    float k4 = (mid_right.y - mid_mouth.y) / (mid_right.x - mid_mouth.x);

    float b1 = mid_eye.y - k1 * mid_eye.x;
    float b2 = mid_eye.y - k2 * mid_eye.x;
    float b3 = mid_mouth.y - k3 * mid_mouth.x;
    float b4 = mid_mouth.y - k4 * mid_mouth.x;

    int rows = img.rows;
    int cols = img.cols;

    float sum_pix = 0;
    int count = 1;
    for (size_t i = 0; i < rows; i++)
    {
        uchar * ptr = edgeHorizontal.ptr<uchar>(i);
        uchar * ptr2 = edgeVertical.ptr<uchar>(i);
        for (size_t j = 0; j < cols; j++)
        {
            if ((k1 * j + b1 < i) && (k2 * j + b2 < i) && (k3 * j + b3 > i) && (k4 * j + b4 > i)) {
                count++;
                sum_pix += ptr[j];
                sum_pix += ptr2[j];
                ptr[j] = 0;
                ptr2[j] = 0;
            }
        }
    }
    cv::imshow("img_", edgeVertical);
    return sum_pix / count;
}

void bgr_gray_classify() {
    string gray_path = "E:\\work\\anti_spoofing\\data\\CASIA_ANTI\\data\\data_picture\\anti_print\\gray\\gray\\";
    string bgr_path = "E:\\work\\anti_spoofing\\data\\CASIA_ANTI\\data\\data_picture\\really\\real_faces\\celebra\\celebra1\\";

    string file_path[] = {gray_path, bgr_path};

    string model = "E:\\work\\face_detect\\cascadeCNN\\crasscnn\\caffemodel\\";
    FaceDetector facedetector(model, 48, 0.7, true, 0.7, 0);
    
    vector<vector<float>> value(2);

    for (size_t temp = 0; temp < 2; temp++)
    {
        vector<string> files, file_names;
        util::getFiles(file_path[temp], files, file_names, ".jpg");

        int length = file_names.size();
        cv::Mat img, gray;
        for (size_t i = 0; i < length; i++)
        {
            img = imread(files[i]);
            cvtColor(img, gray, CV_BGR2GRAY);
            vector<Rect> rects;
            int status = getFaceRect(facedetector, img, rects, 1.0);
            if (status > 0) {
                value[temp].push_back(caculate_residual_pixel(img(rects[0])));
            }
        }
        float value_mean = 0;
        int value_size = value[temp].size();
        for (size_t i = 0; i < value_size; i++)
        {
            value_mean += value[temp][i];
        }
        value_mean = value_mean / value_size;
        std::cout << " value mean is:" << value_mean << std::endl;
    }
}

void bgr_gray_classify_camera() {


    string model = "E:\\work\\face_detect\\cascadeCNN\\crasscnn\\caffemodel\\";
    FaceDetector facedetector(model, 48, 0.7, true, 0.7, 0);

    cv::VideoCapture cap(1);
    if (!cap.isOpened())
    {
        std::cout << "Unable to connect to camera" << std::endl;
        return;
    }
    std::ostringstream outtext;
    //main loop
    cv::Mat temp, gray;
    while (true)
    {
        // Grab a frame

        cap >> temp;
        cvtColor(temp, gray, CV_BGR2GRAY);

        vector<Rect> rects;
        int status = getFaceRect(facedetector, temp, rects, 1.0);
        if (status > 0) {
            std::cout << "residual :" << caculate_residual_pixel(temp(rects[0])) << std::endl;
        }

        imshow("img", temp);

        unsigned char key = cv::waitKey(1);
        if (key == 27)
        {
            break;
        }
    }
}

void bgr_infrare_classify_camera() {

    string model_facedet = "E:\\work\\face_detect\\cascadeCNN\\crasscnn\\caffemodel\\";
    FaceDetector facedetector(model_facedet, 48, 0.7, true, 0.7, 0);

    string base = "E:\\work\\face_alignment\\lanmark_5\\model\\umd_48_8\\";
    string model = base + "save_path_1\\_iter_700000.caffemodel";
    string proto = base + "o_net_half1_test.prototxt";
    CaffeModel caffe_model(proto, model, DEVICE);


    cv::VideoCapture cap(0);
    if (!cap.isOpened())
    {
        std::cout << "Unable to connect to camera" << std::endl;
        return;
    }
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 1080);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 720);
    std::ostringstream outtext;
    //main loop
    cv::Mat temp, gray;
    while (true)
    {
        // Grab a frame
        cap >> temp;
        cvtColor(temp, gray, CV_BGR2GRAY);

        vector<Rect> rects;
        int status = getFaceRect(facedetector, temp, rects, 1.0);
        if (status > 0) {
           // std::cout << "residual :" << caculate_residual_pixel(temp(rects[0])) << std::endl;  //不行

            //double mean, var;
            //get_img_mean_var(gray(rects[0]), mean, var);
            //std::cout << " img mean :" << mean<< "  "<< var<< std::endl;  //不行
            //std::cout<< "img sobel mean :" <<get_sobel_mean(gray(rects[0])) << std::endl;   //可以

            //std::cout << "get lap var:" << get_laplica_var(gray(rects[0])) << std::endl; //不行
            //std::cout << " get_tenengrad_mean :" << get_tenengrad_mean(gray(rects[0])) << std::endl;

            //float x = 1.0, y = 0.0, z = 0.0;
            //colorException(temp(rects[0]), x, y, z);
           // brightnessException(temp(rects[0]), x, y);


           // std::cout << " get brenner :" << get_brenner(gray(rects[0])) << std::endl; //可以
           // std::cout << " get smd: " << get_smd2(gray(rects[0])) << std::endl; //不行
           // std::cout << " get energy :" << get_energy(gray(rects[0])) << std::endl; //
           // std::cout << " get jpeg :" << get_jpeg(gray(rects[0])) << std::endl;  //不行

           

            cv::Mat resize_face;
            cv::resize(temp(rects[0]), resize_face, cv::Size(48, 48));

            vector<vector<float>> land_result = caffe_model.forward({ resize_face }, {"ip2", "ip3"});

            Point2f nose = Point2f((land_result[0][4] + 0.5)*rects[0].width, (land_result[0][5] + 0.5)*rects[0].height);

            circle(gray(rects[0]), nose, 2, cv::Scalar(255, 0, 0), 1);
            outtext << "Roll: " << land_result[1][2] * 90;
            cv::putText(temp, outtext.str(), cv::Point(50, 40), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
            outtext.str("");
            outtext << "Yaw: " << land_result[1][0] * 90;
            cv::putText(temp, outtext.str(), cv::Point(50, 60), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
            outtext.str("");
            outtext << "pitch: " << land_result[1][1] *90;
            cv::putText(temp, outtext.str(), cv::Point(50, 80), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
            outtext.str("");


            cv::Mat face_img;
            cv::resize(gray(rects[0]), face_img, cv::Size(224, 224));
            int length = land_result[0].size();
            for (size_t i = 0; i < 5; i++)
            {
                land_result[0][2*i] = (land_result[0][2*i] + 0.5)*224;
                land_result[0][2*i+1] = (land_result[0][2*i+1] + 0.5)*224;
                //Point2f nose = Point2f(land_result[0][2*i], land_result[0][2*i+1]);
                //circle(gray(rects[0]), nose, 2, cv::Scalar(255, 0, 0), 1);
            }
            std::cout << "get sobel mean with landmark :" << get_sobel_mean_with_landmark(face_img, land_result[0]) << std::endl;

            imshow("face_img", gray(rects[0]));
        }

        imshow("img", temp);

        unsigned char key = cv::waitKey(1);
        if (key == 27)
        {
            break;
        }
    }
}

void drawRect(Mat & img, Rect rect, int length)
{
    line(img, Point(rect.x, rect.y), Point(rect.x + length, rect.y), Scalar(0, 255, 0), 2);
    line(img, Point(rect.x, rect.y), Point(rect.x, rect.y + length), Scalar(0, 255, 0), 2);

    line(img, Point(rect.x + rect.width, rect.y), Point(rect.x + rect.width - length, rect.y), Scalar(0, 255, 0), 2);
    line(img, Point(rect.x + rect.width, rect.y), Point(rect.x + rect.width, rect.y + length), Scalar(0, 255, 0), 2);

    line(img, Point(rect.x, rect.y + rect.height), Point(rect.x + length, rect.y + rect.height), Scalar(0, 255, 0), 2);
    line(img, Point(rect.x, rect.y + rect.height), Point(rect.x, rect.y + rect.height - length), Scalar(0, 255, 0), 2);

    line(img, Point(rect.x + rect.width, rect.y + rect.height), Point(rect.x + rect.width - length, rect.y + rect.height), Scalar(0, 255, 0), 2);
    line(img, Point(rect.x + rect.width, rect.y + rect.height), Point(rect.x + rect.width, rect.y + rect.height - length), Scalar(0, 255, 0), 2);
}

//#define THREOD_INFRARE 10
#define THREOD_INFRARE 6.5
#define THREOD_BGR_GRAY 30
std::mutex mtx_syn;
std::condition_variable cv_syn;




static void oneInfrareFramePredict()
{
    string model_facedet = "E:\\work\\face_detect\\cascadeCNN\\crasscnn\\caffemodel\\";
    FaceDetector facedetector(model_facedet, 48, 0.7, true, 0.7, 0);

    string base = "E:\\work\\face_alignment\\lanmark_5\\model\\umd_48_8\\";
    string model = base + "save_path_1\\_iter_700000.caffemodel";
    string proto = base + "o_net_half1_test.prototxt";
    CaffeModel caffe_model(proto, model, DEVICE);
    //cunpd *pd = new cunpd();
    //int pd_id;
    //init_model(pd, pd_id);

    //std::ostringstream outtext;

    Mat frame;
    Mat grey;
    int frame_num = 0;
    cv::VideoCapture capture(0);//"E:\\work\\anti_spoofing\\data\\CASIA_ANTI\\data\\really\\CASIA_ANTI\\test_release1\\1.avi"
    if (!capture.isOpened())
    {
        std::cout << "Unable to connect to camera" << std::endl;
        return;
    }
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 1080);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720);
    //capture.set(CV_CAP_PROP_FPS, 30);
    bool found_really_face = false;
    float real = 0;
    while (true)
    {
        capture >> frame;
        if (!frame.empty())
        {
            vector<Rect> rects;
            cvtColor(frame, grey, CV_BGR2GRAY);
            int status = getFaceRect(facedetector, frame, rects, 1.0f);
            if (status == 1)
            {
                cv::Mat resize_face;
                cv::resize(frame(rects[0]), resize_face, cv::Size(48, 48));

                vector<vector<float>> land_result = caffe_model.forward({ resize_face }, { "ip2", "ip3" });

                Point2f nose = Point2f((land_result[0][4] + 0.5)*rects[0].width, (land_result[0][5] + 0.5)*rects[0].height);

                /*outtext << "Roll: " << land_result[1][2] * 90;
                cv::putText(temp, outtext.str(), cv::Point(50, 40), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
                outtext.str("");
                outtext << "Yaw: " << land_result[1][0] * 90;
                cv::putText(temp, outtext.str(), cv::Point(50, 60), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
                outtext.str("");
                outtext << "pitch: " << land_result[1][1] * 90;
                cv::putText(temp, outtext.str(), cv::Point(50, 80), cv::FONT_HERSHEY_SIMPLEX, 0.75, cv::Scalar(0, 0, 255));
                outtext.str("");*/

                cv::Mat face_img;
                cv::resize(grey(rects[0]), face_img, cv::Size(224, 224));
                int length = land_result[0].size();
                for (size_t i = 0; i < 5; i++)
                {
                    land_result[0][2 * i] = (land_result[0][2 * i] + 0.5) * 224;
                    land_result[0][2 * i + 1] = (land_result[0][2 * i + 1] + 0.5) * 224;
                    //Point2f nose = Point2f(land_result[0][2*i], land_result[0][2*i+1]);
                    //circle(gray(rects[0]), nose, 2, cv::Scalar(255, 0, 0), 1);
                }
                //std::cout << "get sobel mean with landmark :" <<  << std::endl;
                real = get_sobel_mean_with_landmark(face_img, land_result[0]);

                std::cout << " real infrare:" << real << std::endl;
                //std::cout << "get light :" << caculate_img_mean(face_img) << std::endl;
            }
            else {
                real = 0;
                putText(frame, "NO FACE", Point(20, 200), 1, 2, Scalar(255, 0, 0));
            }
            string tex = " real:";
            found_really_face = real > THREOD_INFRARE ? true : false;
            putText(frame, tex + to_string(found_really_face), Point(20, 30 + 30), 1, 2, Scalar(0, 0, 255));
            int center_x = frame.cols / 2;
            int center_y = frame.rows / 2;
            Rect rect(center_x - 100, center_y - 100, 200, 200);
            drawRect(frame, rect, 20);
            cv::imshow("infrare", frame);
            cv::waitKey(10);
        }
    }
    capture.release();
}
//static void init_model(cunpd * & pd, int &pd_id)
//{
//    pd_id = pd->AddNpdModel(0);
//}
static void oneBGRFramePredict()
{
    string model = "E:\\work\\face_detect\\cascadeCNN\\crasscnn\\caffemodel\\";
    FaceDetector facedetector(model, 48, 0.7, true, 0.7, 0);

    Mat frame;
    Mat bound_frame;
    Mat gray;
    int frame_num = 0;
    cv::VideoCapture capture(1);
    if (!capture.isOpened())
    {
        std::cout << "Unable to connect to camera" << std::endl;
        return;
    }

    capture.set(CV_CAP_PROP_FRAME_WIDTH, 1088);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720);
    cv_syn.notify_one();

    float real = 0;
    while (true)
    {
        capture >> frame;

        if (frame.empty())
            break;

        vector<Rect> rects;
        cvtColor(frame, gray, CV_BGR2GRAY);
        int status = getFaceRect(facedetector, frame, rects, 0.8);
        if (status == 1)
        {
            real = caculate_residual_pixel(frame(rects[0]));
            std::cout << " real bgr:" << real << std::endl;
        }
        else {
            real = 0;
            putText(frame, "NO FACE", Point(20, 200), 1, 2, Scalar(255, 0, 0));
        }

        string tex = " real:";
        putText(frame, tex + to_string(real > THREOD_BGR_GRAY ? 1: 0), Point(20, 60), 1, 2, Scalar(0, 0, 255));

        int center_x = frame.cols / 2;
        int center_y = frame.rows / 2;
        Rect rect(center_x - 100, center_y - 100, 200, 200);
        drawRect(frame, rect, 20);
        imshow("frame", frame);
        waitKey(10);
    }
}


void tradition_anti_spoofing() {
    
    std::thread bgr_camera(oneBGRFramePredict);

    unique_lock<mutex> lck(mtx_syn);
    cv_syn.wait(lck);
    lck.unlock();

    std::thread infrare_camera(oneInfrareFramePredict);

    bgr_camera.join();
    infrare_camera.join();
}

int main()
{
    //generate_bad_img();
    //get_quality_value();

    //generate_bad_img_dft();


    //use_camera_check();
    
    //wflw_dataset_check();
    //wflw_dataset_check_test();
    
    //bgr_gray_classify();
    
    //bgr_gray_classify_camera();
    
    //bgr_infrare_classify_camera();
    
    tradition_anti_spoofing();

    //get_img_dft_var();

    system("pause");
    return 0;
}

 

  • 1
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

NineDays66

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值