【Opencv测试用例(七)】


Chapter09

1、binaryDescriptors.cpp

结果

在这里插入图片描述

测试代码


#include <iostream>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/features2d.hpp>

static int test();

int main()
{
	test();
	system("pause");
	return 0;
}

static int test()
{
	// image matching

	// 1. Read input images
	std::string path_church01 = "F:/images/church01.jpg";
	cv::Mat image1 = cv::imread(path_church01, cv::IMREAD_GRAYSCALE);
	std::string path_church02 = "F:/images/church02.jpg";
	cv::Mat image2 = cv::imread(path_church02, cv::IMREAD_GRAYSCALE);

	// 2. Define keypoint vectors and descriptors
	std::vector<cv::KeyPoint> keypoints1;
	std::vector<cv::KeyPoint> keypoints2;
	cv::Mat descriptors1;
	cv::Mat descriptors2;

	// 3. Define feature detector/descriptor
	// Construct the ORB feature object
	cv::Ptr<cv::Feature2D> feature =
		cv::ORB::create(60);
	//   cv::BRISK::create(80);

	// 4. Keypoint detection and description
	// Detect the ORB features
	feature->detectAndCompute(image1, cv::noArray(), keypoints1, descriptors1);
	feature->detectAndCompute(image2, cv::noArray(), keypoints2, descriptors2);

	// Draw feature points
	cv::Mat featureImage;
	cv::drawKeypoints(image1, keypoints1, featureImage, cv::Scalar(255, 255, 255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	// Display the corners
	cv::namedWindow("ORB");
	cv::imshow("ORB", featureImage);

	std::cout << "Number of ORB keypoints (image 1): " << keypoints1.size() << std::endl;
	std::cout << "Number of ORB keypoints (image 2): " << keypoints2.size() << std::endl;

	// to describe with FREAK (use with BRISK)
	// feature = cv::xfeatures2d::FREAK::create();
	// feature->compute(image1, keypoints1, descriptors1);
	// feature->compute(image1, keypoints2, descriptors2);

   // Construction of the matcher 
	cv::BFMatcher matcher(
		cv::NORM_HAMMING); // always use hamming norm
						   // for binary descriptors
	// Match the two image descriptors
	std::vector<cv::DMatch> matches;
	matcher.match(descriptors1, descriptors2, matches);

	// draw matches
	cv::Mat imageMatches;
	cv::drawMatches(
		image1, keypoints1, // 1st image and its keypoints
		image2, keypoints2, // 2nd image and its keypoints
		matches,           // the matches
		imageMatches,      // the image produced
		cv::Scalar(255, 255, 255),  // color of lines
		cv::Scalar(255, 255, 255),  // color of points
		std::vector< char >(),    // masks if any 
		cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS | cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	// Display the image of matches
	cv::namedWindow("ORB Matches");
	cv::imshow("ORB Matches", imageMatches);
	//   cv::namedWindow("FREAK Matches");
	//   cv::imshow("FREAK Matches", imageMatches);

	std::cout << "Number of matches: " << matches.size() << std::endl;

	cv::waitKey();
	return 0;
}

2、patches.cpp

结果

在这里插入图片描述

测试代码

#include <iostream>
#include <vector>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2/objdetect.hpp>

static int test();

int main()
{
	test();
	system("pause");
	return 0;
}

static int test()
{
	// image matching

	// 1. Read input images
	std::string path_church02 = "F:/images/church02.jpg";
	std::string path_church01 = "F:/images/church01.jpg";
	cv::Mat image1 = cv::imread(path_church01, cv::IMREAD_GRAYSCALE);
	cv::Mat image2 = cv::imread(path_church02, cv::IMREAD_GRAYSCALE);

	// 2. Define keypoints vector
	std::vector<cv::KeyPoint> keypoints1;
	std::vector<cv::KeyPoint> keypoints2;

	// 3. Define feature detector
	cv::Ptr<cv::FeatureDetector> ptrDetector;           // generic detector
	ptrDetector = cv::FastFeatureDetector::create(80);   // we select the FAST detector

	// 4. Keypoint detection
	ptrDetector->detect(image1, keypoints1);
	ptrDetector->detect(image2, keypoints2);

	std::cout << "Number of keypoints (image 1): " << keypoints1.size() << std::endl;
	std::cout << "Number of keypoints (image 2): " << keypoints2.size() << std::endl;

	// 5. Define a square neighborhood
	const int nsize(11); // size of the neighborhood
	cv::Rect neighborhood(0, 0, nsize, nsize); // 11x11
	cv::Mat patch1;
	cv::Mat patch2;

	// 6. For all keypoints in first image
	//    find best match in second image
	cv::Mat result;
	std::vector<cv::DMatch> matches;

	//for all keypoints in image 1
	for (int i = 0; i < keypoints1.size(); i++) {

		// define image patch
		neighborhood.x = keypoints1[i].pt.x - nsize / 2;
		neighborhood.y = keypoints1[i].pt.y - nsize / 2;

		// if neighborhood of points outside image, then continue with next point
		if (neighborhood.x < 0 || neighborhood.y < 0 ||
			neighborhood.x + nsize >= image1.cols || neighborhood.y + nsize >= image1.rows)
			continue;

		//patch in image 1
		patch1 = image1(neighborhood);

		// reset best correlation value;
		cv::DMatch bestMatch;

		//for all keypoints in image 2
		for (int j = 0; j < keypoints2.size(); j++) {

			// define image patch
			neighborhood.x = keypoints2[j].pt.x - nsize / 2;
			neighborhood.y = keypoints2[j].pt.y - nsize / 2;

			// if neighborhood of points outside image, then continue with next point
			if (neighborhood.x < 0 || neighborhood.y < 0 ||
				neighborhood.x + nsize >= image2.cols || neighborhood.y + nsize >= image2.rows)
				continue;

			// patch in image 2
			patch2 = image2(neighborhood);

			// match the two patches
			cv::matchTemplate(patch1, patch2, result, cv::TM_SQDIFF);

			// check if it is a best match
			if (result.at<float>(0, 0) < bestMatch.distance) {

				bestMatch.distance = result.at<float>(0, 0);
				bestMatch.queryIdx = i;
				bestMatch.trainIdx = j;
			}
		}

		// add the best match
		matches.push_back(bestMatch);
	}

	std::cout << "Number of matches: " << matches.size() << std::endl;

	// extract the 50 best matches
	std::nth_element(matches.begin(), matches.begin() + 50, matches.end());
	matches.erase(matches.begin() + 50, matches.end());

	std::cout << "Number of matches (after): " << matches.size() << std::endl;

	// Draw the matching results
	cv::Mat matchImage;
	cv::drawMatches(image1, keypoints1, // first image
		image2, keypoints2, // second image
		matches,     // vector of matches
		matchImage,  // produced image
		cv::Scalar(255, 255, 255),  // line color
		cv::Scalar(255, 255, 255)); // point color

// Display the image of matches
	cv::namedWindow("Matches");
	cv::imshow("Matches", matchImage);

	// Match template

	// define a template
	cv::Mat target(image1, cv::Rect(80, 105, 30, 30));
	// Display the template
	cv::namedWindow("Template");
	cv::imshow("Template", target);

	// define search region
	cv::Mat roi(image2,
		// here top half of the image
		cv::Rect(0, 0, image2.cols, image2.rows / 2));

	// perform template matching
	cv::matchTemplate(
		roi,    // search region
		target, // template
		result, // result
		cv::TM_SQDIFF); // similarity measure
	// find most similar location
	double minVal, maxVal;
	cv::Point minPt, maxPt;
	cv::minMaxLoc(result, &minVal, &maxVal, &minPt, &maxPt);

	// draw rectangle at most similar location
	// at minPt in this case
	cv::rectangle(roi, cv::Rect(minPt.x, minPt.y, target.cols, target.rows), 255);

	// Display the template
	cv::namedWindow("Best");
	cv::imshow("Best", image2);

	cv::waitKey();
	return 0;
}

3、detectObjects.cpp

结果

在这里插入图片描述
在这里插入图片描述

测试代码

#include <iostream>
#include<opencv2/opencv.hpp>

static int test()
{
	// open the positive sample images
	std::vector<cv::Mat> referenceImages;
	std::string path_stop00 = "F:/images/stopSamples/stop00.png";
	std::string path_stop01 = "F:/images/stopSamples/stop01.png";
	std::string path_stop02 = "F:/images/stopSamples/stop02.png";
	std::string path_stop03 = "F:/images/stopSamples/stop03.png";
	std::string path_stop04 = "F:/images/stopSamples/stop04.png";
	std::string path_stop05 = "F:/images/stopSamples/stop05.png";
	std::string path_stop06 = "F:/images/stopSamples/stop06.png";
	std::string path_stop07 = "F:/images/stopSamples/stop07.png";
	referenceImages.push_back(cv::imread(path_stop00));
	referenceImages.push_back(cv::imread(path_stop01));
	referenceImages.push_back(cv::imread(path_stop02));
	referenceImages.push_back(cv::imread(path_stop03));
	referenceImages.push_back(cv::imread(path_stop04));
	referenceImages.push_back(cv::imread(path_stop05));
	referenceImages.push_back(cv::imread(path_stop06));
	referenceImages.push_back(cv::imread(path_stop07));

	// create a composite image
	cv::Mat positveImages(2 * referenceImages[0].rows, 4 * referenceImages[0].cols, CV_8UC3);
	for (int i = 0; i < 2; i++)
		for (int j = 0; j < 4; j++) {

			referenceImages[i * 2 + j].copyTo(positveImages(cv::Rect(j*referenceImages[i * 2 + j].cols, i*referenceImages[i * 2 + j].rows, referenceImages[i * 2 + j].cols, referenceImages[i * 2 + j].rows)));
		}

	cv::imshow("Positive samples", positveImages);
	std::string path_bg01 = "F:/images/stopSamples/bg01.jpg";
	cv::Mat negative = cv::imread(path_bg01);
	cv::resize(negative, negative, cv::Size(), 0.33, 0.33);
	cv::imshow("One negative sample", negative);

	std::string path_stop9 = "F:/images/stopSamples/stop9.jpg";
	cv::Mat inputImage = cv::imread(path_stop9);
	cv::resize(inputImage, inputImage, cv::Size(), 0.5, 0.5);

	cv::CascadeClassifier cascade;
	if (!cascade.load("F:/images/stopSamples/classifier/cascade.xml")) {
		std::cout << "Error when loading the cascade classfier!" << std::endl;
		return -1;
	}

	// predict the label of this image
	std::vector<cv::Rect> detections;

	cascade.detectMultiScale(inputImage, // input image 
		detections, // detection results
		1.1,        // scale reduction factor
		1,          // number of required neighbor detections
		0,          // flags (not used)
		cv::Size(48, 48),    // minimum object size to be detected
		cv::Size(128, 128)); // maximum object size to be detected

	std::cout << "detections= " << detections.size() << std::endl;
	for (int i = 0; i < detections.size(); i++)
		cv::rectangle(inputImage, detections[i], cv::Scalar(255, 255, 255), 2);

	cv::imshow("Stop sign detection", inputImage);

	// Detecting faces
	std::string path_girl = "F:/images/girl.jpg";
	cv::Mat picture = cv::imread(path_girl);
	cv::CascadeClassifier faceCascade;
	if (!faceCascade.load("D:/software/OpenCv4.5/opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml")) {
		std::cout << "Error when loading the face cascade classfier!" << std::endl;
		return -1;
	}

	faceCascade.detectMultiScale(picture, // input image 
		detections, // detection results
		1.1,        // scale reduction factor
		3,          // number of required neighbor detections
		0,          // flags (not used)
		cv::Size(48, 48),    // minimum object size to be detected
		cv::Size(128, 128)); // maximum object size to be detected

	std::cout << "detections= " << detections.size() << std::endl;
	// draw detections on image
	for (int i = 0; i < detections.size(); i++)
		cv::rectangle(picture, detections[i], cv::Scalar(255, 255, 255), 2);

	// Detecting eyes
	cv::CascadeClassifier eyeCascade;
	if (!eyeCascade.load("D:/software/OpenCv4.5/opencv/sources/data/haarcascades/haarcascade_eye.xml")) {
		std::cout << "Error when loading the eye cascade classfier!" << std::endl;
		return -1;
	}

	eyeCascade.detectMultiScale(picture, // input image 
		detections, // detection results
		1.1,        // scale reduction factor
		3,          // number of required neighbor detections
		0,          // flags (not used)
		cv::Size(24, 24),    // minimum object size to be detected
		cv::Size(64, 64)); // maximum object size to be detected

	std::cout << "detections= " << detections.size() << std::endl;
	// draw detections on image
	for (int i = 0; i < detections.size(); i++)
		cv::rectangle(picture, detections[i], cv::Scalar(0, 0, 0), 2);

	cv::imshow("Detection results", picture);

	cv::waitKey();
}

int main()
{
	test();
	system("pause");
	return 0;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值