用opencv和caffe模型来进行人脸识别。

图片识别:


#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>

using namespace std;
using namespace cv;
using namespace cv::dnn;

// Set the size of image and meanval
const size_t inWidth = 300;
const size_t inHeight = 300;
const double inScaleFactor = 1.0;
const Scalar meanVal(104.0, 177.0, 123.0);

int main(int argc, char** argv)
{
	// Load image
	Mat img;
	// Use commandline
#if 0
	if (argc < 2)
	{
		cerr<< "please input "<< endl;
		cerr << "[Format]face_detector_img.exe image.jpg"<< endl;
		return -1;
	}
	img = imread(argv[1]);
#else
	// Not use commandline
	cv::VideoCapture cap(0);                              //范围解析运算符,定义类成员。
        if (!cap.isOpened()) {
            cout << "连接不上摄像头" << endl;
            return 1;
        }
		cap >> img;
	//img = imread("11.jpg");
#endif
	// Initialize Caffe network
	float min_confidence = 0.5;
	String modelConfiguration = "deploy.prototxt";
	String modelBinary = "res10_300x300_ssd_iter_140000.caffemodel";
	dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
	if (net.empty())
	{
		cerr << "Can't load network by using the following files: " << endl;
		cerr << "prototxt:   " << modelConfiguration << endl;
		cerr << "caffemodel: " << modelBinary << endl;
		cerr << "Models are available here:" << endl;
		cerr << "<OPENCV_SRC_DIR>/samples/dnn/face_detector" << endl;
		cerr << "or here:" << endl;
		cerr << "https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector" << endl;
		exit(-1);
	}
	// Prepare blob
	Mat inputBlob = blobFromImage(img, inScaleFactor, Size(inWidth, inHeight), meanVal, false, false);
	net.setInput(inputBlob, "data");	// set the network input
	Mat detection = net.forward("detection_out");	// compute output

	// Calculate and display time and frame rate
	vector<double> layersTimings;
	double freq = getTickFrequency() / 1000;
	double time = net.getPerfProfile(layersTimings) / freq;
	Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

	ostringstream ss;
	ss << "FPS: " << 1000 / time << " ; time: " << time << "ms" << endl;
	putText(img, ss.str(), Point(20,20), 0, 0.5, Scalar(0, 0, 255));
	// 
	float confidenceThreshold = min_confidence;
	for (int i = 0; i < detectionMat.rows; ++i)
	{
		// judge confidence
		float confidence = detectionMat.at<float>(i, 2);
		if (confidence > confidenceThreshold)
		{
			int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * img.cols);
			int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * img.rows);
			int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * img.cols);
			int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * img.rows);
			Rect object((int)xLeftBottom, (int)yLeftBottom,
				(int)(xRightTop - xLeftBottom),
				(int)(yRightTop - yLeftBottom));
			rectangle(img, object, Scalar(255, 255, 255),8);
			ss.str("");
			ss << confidence;
			String conf(ss.str());
			String label = "Face: " + conf;
			int baseLine = 0;
			Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 5, 10, &baseLine);
			rectangle(img, Rect(Point(xLeftBottom, yLeftBottom-labelSize.height), 
				Size(labelSize.width, labelSize.height + baseLine)), 
				Scalar(255, 255, 255), CV_FILLED);
			putText(img, label, Point(xLeftBottom, yLeftBottom), 
				FONT_HERSHEY_SIMPLEX, 5, Scalar(0, 0, 0));
		}

	}

	namedWindow("Face Detection", WINDOW_NORMAL);
	imshow("Face Detection", img);
	waitKey(0);
	return 0;

}

摄像头连接识别:

#include <iostream>  
#include <cstdlib>  
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>

using namespace cv;  
using namespace cv::dnn;  
using namespace std;  
const size_t inWidth = 300;  
const size_t inHeight = 300;  
const double inScaleFactor = 1.0;  
const Scalar meanVal(104.0, 177.0, 123.0);  

int main(int argc, char** argv)  
{  
    float min_confidence = 0.5;  
    String modelConfiguration = "deploy.prototxt";  
    String modelBinary = "res10_300x300_ssd_iter_140000.caffemodel";  
    //! [Initialize network]  
    dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);  
    //! [Initialize network]  
    if (net.empty())  
    {  
        cerr << "Can't load network by using the following files: " << endl;  
        cerr << "prototxt:   " << modelConfiguration << endl;  
        cerr << "caffemodel: " << modelBinary << endl;  
        cerr << "Models are available here:" << endl;  
        cerr << "<OPENCV_SRC_DIR>/samples/dnn/face_detector" << endl;  
        cerr << "or here:" << endl;  
        cerr << "https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector" << endl;  
        exit(-1);  
    }  

    VideoCapture cap(0);  
    if (!cap.isOpened())  
    {  
        cout << "Couldn't open camera : " << endl;  
        return -1;  
    }  
    for (;;)  
    {  
        Mat frame;  
        cap >> frame; // get a new frame from camera/video or read image  
        if (frame.empty())  
        {  
            waitKey();  
            break;  
        }  
        if (frame.channels() == 4)  
            cvtColor(frame, frame, COLOR_BGRA2BGR);  
        //! [Prepare blob]  
        Mat inputBlob = blobFromImage(frame, inScaleFactor,  
            Size(inWidth, inHeight), meanVal, false, false); //Convert Mat to batch of images  
                                                             //! [Prepare blob]  
                                                             //! [Set input blob]  

        net.setInput(inputBlob, "data"); //set the network input  
                                         //! [Set input blob]  
                                         //! [Make forward pass]  
        Mat detection = net.forward("detection_out"); //compute output  
                                                      //! [Make forward pass]  

        vector<double> layersTimings;  
        double freq = getTickFrequency() / 1000;  
        double time = net.getPerfProfile(layersTimings) / freq;  
        Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());  

        ostringstream ss;  
        ss << "FPS: " << 1000 / time << " ; time: " << time << " ms";  
        putText(frame, ss.str(), Point(30, 80), 2, 2.5, Scalar(0, 0, 255));  

        float confidenceThreshold = min_confidence;  
        for (int i = 0; i < detectionMat.rows; i++)  
        {  
            float confidence = detectionMat.at<float>(i, 2);  
            if (confidence > confidenceThreshold)  
            {  
                int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);  
                int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);  
                int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);  
                int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);  
                Rect object((int)xLeftBottom, (int)yLeftBottom,  
                    (int)(xRightTop - xLeftBottom),  
                    (int)(yRightTop - yLeftBottom));  
                rectangle(frame, object, Scalar(0, 255, 0),8);  
                ss.str("");  
                ss << confidence;  
                String conf(ss.str());  
                String label = "Face: " + conf;  
                int baseLine = 0;  
                Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 1, 4, &baseLine);  
                rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),  
                    Size(labelSize.width, labelSize.height + baseLine)),  
                    Scalar(255, 255, 255), CV_FILLED);  
                putText(frame, label, Point(xLeftBottom, yLeftBottom),  
                    FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 0));  
            }  
        }  
         namedWindow("detections", WINDOW_NORMAL);
        cv::imshow("detections", frame);  
        if (waitKey(1) >= 0) break;  
    }  
    return 0;  
} 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值