【Opencv测试用例(四)】


Chapter05

1、morphology.cpp

结果

在这里插入图片描述
在这里插入图片描述

测试代码

#include<opencv2/opencv.hpp>

static int test()
{
	// Read input image
	std::string path_binary = "F:/images/binary.bmp";
	cv::Mat image= cv::imread(path_binary);
	if (!image.data)
		return 0; 

    // Display the image
	cv::namedWindow("Image");
	cv::imshow("Image",image);

	// Erode the image
	// with the default 3x3 structuring element (SE)
	cv::Mat eroded; // the destination image
	cv::erode(image,eroded,cv::Mat());

    // Display the eroded image
	cv::namedWindow("Eroded Image");
	cv::imshow("Eroded Image",eroded);

	// Dilate the image
	cv::Mat dilated; // the destination image
	cv::dilate(image,dilated,cv::Mat());

    // Display the dilated image
	cv::namedWindow("Dilated Image");
	cv::imshow("Dilated Image",dilated);

	// Erode the image with a larger SE
	// create a 7x7 mat with containing all 1s
	cv::Mat element(7,7,CV_8U,cv::Scalar(1));
	// erode the image with that SE
	cv::erode(image,eroded,element);

    // Display the eroded image
	cv::namedWindow("Eroded Image (7x7)");
	cv::imshow("Eroded Image (7x7)",eroded);

	// Erode the image 3 times.
	cv::erode(image,eroded,cv::Mat(),cv::Point(-1,-1),3);

    // Display the eroded image
	cv::namedWindow("Eroded Image (3 times)");
	cv::imshow("Eroded Image (3 times)",eroded);

	// Close the image
	cv::Mat element5(5,5,CV_8U,cv::Scalar(1));
	cv::Mat closed;
	cv::morphologyEx(image,closed,    // input and output images
		             cv::MORPH_CLOSE, // operator code
		             element5);       // structuring element

    // Display the closed image
	cv::namedWindow("Closed Image");
	cv::imshow("Closed Image",closed);

	// Open the image
	cv::Mat opened;
	cv::morphologyEx(image,opened,cv::MORPH_OPEN,element5);

    // Display the opened image
	cv::namedWindow("Opened Image");
	cv::imshow("Opened Image",opened);

	// explicit closing
	// 1. dilate original image
	cv::Mat result;
	cv::dilate(image, result, element5);
	// 2. in-place erosion of the dilated image
	cv::erode(result, result, element5);

	// Display the closed image
	cv::namedWindow("Closed Image (2)");
	cv::imshow("Closed Image (2)", result);

	// Close and Open the image
	cv::morphologyEx(image,image,cv::MORPH_CLOSE,element5);
	cv::morphologyEx(image,image,cv::MORPH_OPEN,element5);

    // Display the close/opened image
	cv::namedWindow("Closed|Opened Image");
	cv::imshow("Closed|Opened Image",image);
	cv::imwrite("binaryGroup.bmp",image);

	// Read input image
	image= cv::imread(path_binary);

	// Open and Close the image
	cv::morphologyEx(image,image,cv::MORPH_OPEN,element5);
	cv::morphologyEx(image,image,cv::MORPH_CLOSE,element5);

    // Display the close/opened image
	cv::namedWindow("Opened|Closed Image");
	cv::imshow("Opened|Closed Image",image);

	// Read input image (gray-level)
	std::string path_boldt = "F:/images/boldt.jpg";
	image = cv::imread(path_boldt,0);
	if (!image.data)
		return 0;

	// Get the gradient image using a 3x3 structuring element
	cv::morphologyEx(image, result, cv::MORPH_GRADIENT, cv::Mat());

	// Display the morphological edge image
	cv::namedWindow("Edge Image");
	cv::imshow("Edge Image", 255 - result);

	// Apply threshold to obtain a binary image
	int threshold(80);
	cv::threshold(result, result,
					threshold, 255, cv::THRESH_BINARY);

	// Display the close/opened image
	cv::namedWindow("Thresholded Edge Image");
	cv::imshow("Thresholded Edge Image", result);

	// Get the gradient image using a 3x3 structuring element
	cv::morphologyEx(image, result, cv::MORPH_GRADIENT, cv::Mat());

	// Read input image (gray-level)
	std::string path_book = "F:/images/book.jpg";
	image = cv::imread(path_book, 0);
	if (!image.data)
		return 0;
	// rotate the image for easier display
	cv::transpose(image, image);
	cv::flip(image, image, 0);

	// Apply the black top-hat transform using a 7x7 structuring element
	cv::Mat element7(7, 7, CV_8U, cv::Scalar(1));
	cv::morphologyEx(image, result, cv::MORPH_BLACKHAT, element7);

	// Display the top-hat image
	cv::namedWindow("7x7 Black Top-hat Image");
	cv::imshow("7x7 Black Top-hat Image", 255-result);

	// Apply threshold to obtain a binary image
	threshold= 25;
	cv::threshold(result, result,
		threshold, 255, cv::THRESH_BINARY);

	// Display the morphological edge image
	cv::namedWindow("Thresholded Black Top-hat");
	cv::imshow("Thresholded Black Top-hat", 255 - result);

	// Apply the black top-hat transform using a 7x7 structuring element
	cv::morphologyEx(image, result, cv::MORPH_CLOSE, element7);

	// Display the top-hat image
	cv::namedWindow("7x7 Closed Image");
	cv::imshow("7x7 Closed Image", 255 - result);

	cv::waitKey();
	return 0;
}

int main()
{
	test();
	system("pause");
	return 0;
}

2、mserFeatures.cpp

结果

在这里插入图片描述

测试代码

#include <iostream>
#include<opencv2/opencv.hpp>
#include <vector>

static int test()
{
	// Read input image
	std::string path_building = "F:/images/building.jpg";
	cv::Mat image = cv::imread(path_building, 0);
	if (!image.data)
		return 0;

	// Display the image
	cv::namedWindow("Image");
	cv::imshow("Image", image);


	// basic MSER detector
	cv::Ptr<cv::MSER> ptrMSER = cv::MSER::create(5,     // delta value for local minima detection
		200,   // min acceptable area 
		2000); // max acceptable area

// vector of point sets
	std::vector<std::vector<cv::Point> > points;
	// vector of rectangles
	std::vector<cv::Rect> rects;
	// detect MSER features
	ptrMSER->detectRegions(image, points, rects);

	std::cout << points.size() << " MSERs detected" << std::endl;

	// create white image
	cv::Mat output(image.size(), CV_8UC3);
	output = cv::Scalar(255, 255, 255);

	// OpenCV random number generator
	cv::RNG rng;

	// Display the MSERs in color areas
	// for each detected feature
	// reverse order to display the larger MSER first
	for (std::vector<std::vector<cv::Point> >::reverse_iterator it = points.rbegin();
		it != points.rend(); ++it) {

		// generate a random color
		cv::Vec3b c(rng.uniform(0, 254),
			rng.uniform(0, 254),
			rng.uniform(0, 254));

		std::cout << "MSER size= " << it->size() << std::endl;
		// for each point in MSER set
		for (std::vector<cv::Point>::iterator itPts = it->begin();
			itPts != it->end(); ++itPts) {
			//do not overwrite MSER pixels
			if (output.at<cv::Vec3b>(*itPts)[0] == 255) {

				output.at<cv::Vec3b>(*itPts) = c;
			}
		}
	}

	cv::namedWindow("MSER point sets");
	cv::imshow("MSER point sets", output);
	cv::imwrite("mser.bmp", output);

	// Extract and display the rectangular MSERs
	std::vector<cv::Rect>::iterator itr = rects.begin();
	std::vector<std::vector<cv::Point> >::iterator itp = points.begin();
	for (; itr != rects.end(); ++itr, ++itp) {

		// ratio test
		if (static_cast<double>(itp->size()) / itr->area() > 0.6)
			cv::rectangle(image, *itr, cv::Scalar(255), 2);
	}
	// Display the resulting image
	cv::namedWindow("Rectangular MSERs");
	cv::imshow("Rectangular MSERs", image);

	// Reload the input image
	image = cv::imread(path_building, 0);
	if (!image.data)
		return 0;
	// Extract and display the elliptic MSERs
	for (std::vector<std::vector<cv::Point> >::iterator it = points.begin();
		it != points.end(); ++it) {

		// for each point in MSER set
		for (std::vector<cv::Point>::iterator itPts = it->begin();
			itPts != it->end(); ++itPts) {

			// Extract bouding rectangles
			cv::RotatedRect rr = cv::minAreaRect(*it);
			// check ellipse elongation
			if (rr.size.height / rr.size.height > 0.6 || rr.size.height / rr.size.height < 1.6)
				cv::ellipse(image, rr, cv::Scalar(255), 2);
		}
	}
	// Display the image
	cv::namedWindow("MSER ellipses");
	cv::imshow("MSER ellipses", image);
	cv::waitKey();
}

int main()
{
	test();
	system("pause");
	return 0;
}

3、segment.cpp

结果

在这里插入图片描述

测试代码


#include <iostream>
#include<opencv2/opencv.hpp>
#include "watershedSegmentation.h"


static int test()
{
	// Read input image
	std::string path_group = "F:/images/group.jpg";
	cv::Mat image = cv::imread(path_group);
	if (!image.data)
		return 0;

	// Display the image
	cv::namedWindow("Original Image");
	cv::imshow("Original Image", image);

	// Get the binary map
	cv::Mat binary;
	std::string path_binary = "F:/images/binary.bmp";
	binary = cv::imread(path_binary, 0);

	// Display the binary image
	cv::namedWindow("Binary Image");
	cv::imshow("Binary Image", binary);

	// Eliminate noise and smaller objects
	cv::Mat fg;
	cv::erode(binary, fg, cv::Mat(), cv::Point(-1, -1), 4);

	// Display the foreground image
	cv::namedWindow("Foreground Image");
	cv::imshow("Foreground Image", fg);

	// Identify image pixels without objects
	cv::Mat bg;
	cv::dilate(binary, bg, cv::Mat(), cv::Point(-1, -1), 4);
	cv::threshold(bg, bg, 1, 128, cv::THRESH_BINARY_INV);

	// Display the background image
	cv::namedWindow("Background Image");
	cv::imshow("Background Image", bg);

	// Show markers image
	cv::Mat markers(binary.size(), CV_8U, cv::Scalar(0));
	markers = fg + bg;
	cv::namedWindow("Markers");
	cv::imshow("Markers", markers);

	// Create watershed segmentation object
	WatershedSegmenter segmenter;

	// Set markers and process
	segmenter.setMarkers(markers);
	segmenter.process(image);

	// Display segmentation result
	cv::namedWindow("Segmentation");
	cv::imshow("Segmentation", segmenter.getSegmentation());

	// Display watersheds
	cv::namedWindow("Watersheds");
	cv::imshow("Watersheds", segmenter.getWatersheds());

	// Open another image
	std::string path_tower = "F:/images/tower.jpg";
	image = cv::imread(path_tower);

	// Identify background pixels
	cv::Mat imageMask(image.size(), CV_8U, cv::Scalar(0));
	cv::rectangle(imageMask, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255), 3);
	// Identify foreground pixels (in the middle of the image)
	cv::rectangle(imageMask, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),
		cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1), 10);

	// Set markers and process
	segmenter.setMarkers(imageMask);
	segmenter.process(image);

	// Display the image with markers
	cv::rectangle(image, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255, 255, 255), 3);
	cv::rectangle(image, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),
		cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1, 1, 1), 10);
	cv::namedWindow("Image with marker");
	cv::imshow("Image with marker", image);

	// Display watersheds
	cv::namedWindow("Watershed");
	cv::imshow("Watershed", segmenter.getWatersheds());

	cv::waitKey();
	return 0;
}


int main()
{
	test();
	system("pause");
	return 0;
}

#include "watershedSegmentation.h"头文件

#if !defined WATERSHS
#define WATERSHS
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
class WatershedSegmenter {
private:
	cv::Mat markers;
public:
	void setMarkers(const cv::Mat& markerImage) {
		// Convert to image of ints
		markerImage.convertTo(markers, CV_32S);
	}
	cv::Mat process(const cv::Mat &image) {
		// Apply watershed
		cv::watershed(image, markers);
		return markers;
	}
	// Return result in the form of an image
	cv::Mat getSegmentation() {
		cv::Mat tmp;
		// all segment with label higher than 255
		// will be assigned value 255
		markers.convertTo(tmp, CV_8U);
		return tmp;
	}

	// Return watershed in the form of an image
	cv::Mat getWatersheds() {
		cv::Mat tmp;
		markers.convertTo(tmp, CV_8U, 255, 255);
		return tmp;
	}
};
#endif

Chapter06

1、derivatives.cpp

结果

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

测试代码

#include <iostream>
#include <iomanip>
#include<opencv2/opencv.hpp>
#include "laplacianZC.h"

static int test()
{
	// Read input image
	std::string path_boldt = "F:/images/boldt.jpg";
	cv::Mat image = cv::imread(path_boldt, 0);
	if (!image.data)
		return 0;

	// Display the image
	cv::namedWindow("Original Image");
	cv::imshow("Original Image", image);

	// Compute Sobel X derivative
	cv::Mat sobelX;
	cv::Sobel(image,  // input
		sobelX,    // output
		CV_8U,     // image type
		1, 0,      // kernel specification
		3,         // size of the square kernel 
		0.4, 128); // scale and offset

	// Display the image
	cv::namedWindow("Sobel X Image");
	cv::imshow("Sobel X Image", sobelX);

	// Compute Sobel Y derivative
	cv::Mat sobelY;
	cv::Sobel(image,  // input
		sobelY,    // output
		CV_8U,     // image type
		0, 1,      // kernel specification
		3,         // size of the square kernel 
		0.4, 128); // scale and offset

	// Display the image
	cv::namedWindow("Sobel Y Image");
	cv::imshow("Sobel Y Image", sobelY);

	// Compute norm of Sobel
	cv::Sobel(image, sobelX, CV_16S, 1, 0);
	cv::Sobel(image, sobelY, CV_16S, 0, 1);
	cv::Mat sobel;
	//compute the L1 norm
	sobel = abs(sobelX) + abs(sobelY);

	double sobmin, sobmax;
	cv::minMaxLoc(sobel, &sobmin, &sobmax);
	std::cout << "sobel value range: " << sobmin << "  " << sobmax << std::endl;

	// Compute Sobel X derivative (7x7)
	cv::Sobel(image, sobelX, CV_8U, 1, 0, 7, 0.001, 128);

	// Display the image
	cv::namedWindow("Sobel X Image (7x7)");
	cv::imshow("Sobel X Image (7x7)", sobelX);

	// Print window pixel values
	for (int i = 0; i < 12; i++) {
		for (int j = 0; j < 12; j++)
			std::cout << std::setw(5) << static_cast<int>(sobel.at<short>(i + 79, j + 215)) << " ";
		std::cout << std::endl;
	}
	std::cout << std::endl;
	std::cout << std::endl;
	std::cout << std::endl;

	// Conversion to 8-bit image
	// sobelImage = -alpha*sobel + 255
	cv::Mat sobelImage;
	sobel.convertTo(sobelImage, CV_8U, -255. / sobmax, 255);

	// Display the image
	cv::namedWindow("Sobel Image");
	cv::imshow("Sobel Image", sobelImage);

	// Apply threshold to Sobel norm (low threshold value)
	cv::Mat sobelThresholded;
	cv::threshold(sobelImage, sobelThresholded, 225, 255, cv::THRESH_BINARY);

	// Display the image
	cv::namedWindow("Binary Sobel Image (low)");
	cv::imshow("Binary Sobel Image (low)", sobelThresholded);

	// Apply threshold to Sobel norm (high threshold value)
	cv::threshold(sobelImage, sobelThresholded, 190, 255, cv::THRESH_BINARY);

	// Display the image
	cv::namedWindow("Binary Sobel Image (high)");
	cv::imshow("Binary Sobel Image (high)", sobelThresholded);


	// Compute Laplacian 3x3
	cv::Mat laplace;
	cv::Laplacian(image, laplace, CV_8U, 1, 1, 128);

	// Display the image
	cv::namedWindow("Laplacian Image");
	cv::imshow("Laplacian Image", laplace);

	int cx(238), cy(90);
	int dx(12), dy(12);

	// Extract small window
	cv::Mat window(image, cv::Rect(cx, cy, dx, dy));
	cv::namedWindow("Image window");
	cv::imshow("Image window", window);
	cv::imwrite("window.bmp", window);

	// Compute Laplacian using LaplacianZC class
	LaplacianZC laplacian;
	laplacian.setAperture(7);
	cv::Mat flap = laplacian.computeLaplacian(image);

	// display min max values of the lapalcian
	double lapmin, lapmax;
	cv::minMaxLoc(flap, &lapmin, &lapmax);

	// display laplacian image
	laplace = laplacian.getLaplacianImage();
	cv::namedWindow("Laplacian Image (7x7)");
	cv::imshow("Laplacian Image (7x7)", laplace);

	// Print image values
	std::cout << std::endl;
	std::cout << "Image values:\n\n";
	for (int i = 0; i < dx; i++) {
		for (int j = 0; j < dy; j++)
			std::cout << std::setw(5) << static_cast<int>(image.at<uchar>(i + cy, j + cx)) << " ";
		std::cout << std::endl;
	}
	std::cout << std::endl;

	// Print Laplacian values
	std::cout << "Laplacian value range=[" << lapmin << "," << lapmax << "]\n";
	std::cout << std::endl;
	for (int i = 0; i < dx; i++) {
		for (int j = 0; j < dy; j++)
			std::cout << std::setw(5) << static_cast<int>(flap.at<float>(i + cy, j + cx) / 100) << " ";
		std::cout << std::endl;
	}
	std::cout << std::endl;

	// Compute and display the zero-crossing points
	cv::Mat zeros;
	zeros = laplacian.getZeroCrossings(flap);
	cv::namedWindow("Zero-crossings");
	cv::imshow("Zero-crossings", 255 - zeros);

	// Print window pixel values
	std::cout << "Zero values:\n\n";
	for (int i = 0; i < dx; i++) {
		for (int j = 0; j < dy; j++)
			std::cout << std::setw(2) << static_cast<int>(zeros.at<uchar>(i + cy, j + cx)) / 255 << " ";
		std::cout << std::endl;
	}

	// down-sample and up-sample the image
	cv::Mat reduced, rescaled;
	cv::pyrDown(image, reduced);
	cv::pyrUp(reduced, rescaled);

	// Display the rescaled image
	cv::namedWindow("Rescaled Image");
	cv::imshow("Rescaled Image", rescaled);

	// compute a difference of Gaussians pyramid
	cv::Mat dog;
	cv::subtract(rescaled, image, dog, cv::Mat(), CV_16S);
	cv::Mat dogImage;
	dog.convertTo(dogImage, CV_8U, 1.0, 128);

	// Display the DoG image
	cv::namedWindow("DoG Image (from pyrdown/pyrup)");
	cv::imshow("DoG Image (from pyrdown/pyrup)", dogImage);

	// Apply two Gaussian filters
	cv::Mat gauss05;
	cv::Mat gauss15;
	cv::GaussianBlur(image, gauss05, cv::Size(), 0.5);
	cv::GaussianBlur(image, gauss15, cv::Size(), 1.5);

	// compute a difference of Gaussians 
	cv::subtract(gauss15, gauss05, dog, cv::Mat(), CV_16S);
	dog.convertTo(dogImage, CV_8U, 2.0, 128);

	// Display the DoG image
	cv::namedWindow("DoG Image");
	cv::imshow("DoG Image", dogImage);

	// Apply two Gaussian filters
	cv::Mat gauss20;
	cv::GaussianBlur(image, gauss20, cv::Size(), 2.0);
	cv::Mat gauss22;
	cv::GaussianBlur(image, gauss22, cv::Size(), 2.2);

	// compute a difference of Gaussians 
	cv::subtract(gauss22, gauss20, dog, cv::Mat(), CV_32F);
	dog.convertTo(dogImage, CV_8U, 10.0, 128);

	// Display the DoG image
	cv::namedWindow("DoG Image (2)");
	cv::imshow("DoG Image (2)", dogImage);

	// Display the zero-crossings of DoG 
	zeros = laplacian.getZeroCrossings(dog);
	cv::namedWindow("Zero-crossings of DoG");
	cv::imshow("Zero-crossings of DoG", 255 - zeros);

	// Display the image with window
	cv::rectangle(image, cv::Rect(cx, cy, dx, dy), cv::Scalar(255, 255, 255));
	cv::namedWindow("Original Image with window");
	cv::imshow("Original Image with window", image);

	cv::waitKey();
	return 0;
}

int main()
{
	test();
	system("pause");
	return 0;
}

laplacianZC.h头文件



#if !defined LAPLACEZC
#define LAPLACEZC
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
class LaplacianZC {
private:
	// laplacian
	cv::Mat laplace;
	// Aperture size of the laplacian kernel
	int aperture;
public:
	LaplacianZC() : aperture(3) {}
	// Set the aperture size of the kernel
	void setAperture(int a) {
		aperture = a;
	}

	// Get the aperture size of the kernel
	int getAperture() const {
		return aperture;
	}

	// Compute the floating point Laplacian
	cv::Mat computeLaplacian(const cv::Mat& image) {
		// Compute Laplacian
		cv::Laplacian(image, laplace, CV_32F, aperture);
		return laplace;
	}

	// Get the Laplacian result in 8-bit image 
	// zero corresponds to gray level 128
	// if no scale is provided, then the max value will be
	// scaled to intensity 255
	// You must call computeLaplacian before calling this method
	cv::Mat getLaplacianImage(double scale = -1.0) {
		if (scale < 0) {
			double lapmin, lapmax;
			cv::minMaxLoc(laplace, &lapmin, &lapmax);
			scale = 127 / std::max(-lapmin, lapmax);
		}
		cv::Mat laplaceImage;
		laplace.convertTo(laplaceImage, CV_8U, scale, 128);
		return laplaceImage;
	}
	// Get a binary image of the zero-crossings
	// laplacian image should be CV_32F
	cv::Mat getZeroCrossings(cv::Mat laplace) {
		// threshold at 0
		// negative values in black
		// positive values in white
		cv::Mat signImage;
		cv::threshold(laplace, signImage, 0, 255, cv::THRESH_BINARY);
		// convert the +/- image into CV_8U
		cv::Mat binary;
		signImage.convertTo(binary, CV_8U);
		// dilate the binary image of +/- regions
		cv::Mat dilated;
		cv::dilate(binary, dilated, cv::Mat());
		// return the zero-crossing contours
		return dilated - binary;
	}
};

#endif

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值