Chapter06
2、filter.cpp
结果
测试代码
#include <iostream>
#include<opencv2/opencv.hpp>
static int test()
{
// Read input image
std::string path_boldt = "F:/images/boldt.jpg";
cv::Mat image = cv::imread(path_boldt, 0);
if (!image.data)
return 0;
// Display the image
cv::namedWindow("Original Image");
cv::imshow("Original Image", image);
// Blur the image with a mean filter
cv::Mat result;
cv::blur(image, result, cv::Size(5, 5));
// Display the blurred image
cv::namedWindow("Mean filtered Image");
cv::imshow("Mean filtered Image", result);
// Blur the image with a mean filter 9x9
cv::blur(image, result, cv::Size(9, 9));
// Display the blurred image
cv::namedWindow("Mean filtered Image (9x9)");
cv::imshow("Mean filtered Image (9x9)", result);
// Gaussian Blur the image
cv::GaussianBlur(image, result,
cv::Size(5, 5), // size of the filter
1.5); // parameter controlling the
// shape of the Gaussian
// Display the blurred image
cv::namedWindow("Gaussian filtered Image");
cv::imshow("Gaussian filtered Image", result);
cv::GaussianBlur(image, result, cv::Size(9, 9), 1.7);
// Display the blurred image
cv::namedWindow("Gaussian filtered Image (9x9)");
cv::imshow("Gaussian filtered Image (9x9)", result);
// Get the gaussian kernel (1.5)
cv::Mat gauss = cv::getGaussianKernel(9, 1.5, CV_32F);
// Display kernel values
cv::Mat_<float>::const_iterator it = gauss.begin<float>();
cv::Mat_<float>::const_iterator itend = gauss.end<float>();
std::cout << "1.5 = [";
for (; it != itend; ++it) {
std::cout << *it << " ";
}
std::cout << "]" << std::endl;
// Get the gaussian kernel (0.5)
gauss = cv::getGaussianKernel(9, 0.5, CV_32F);
// Display kernel values
it = gauss.begin<float>();
itend = gauss.end<float>();
std::cout << "0.5 = [";
for (; it != itend; ++it) {
std::cout << *it << " ";
}
std::cout << "]" << std::endl;
// Get the gaussian kernel (2.5)
gauss = cv::getGaussianKernel(9, 2.5, CV_32F);
// Display kernel values
it = gauss.begin<float>();
itend = gauss.end<float>();
std::cout << "2.5 = [";
for (; it != itend; ++it) {
std::cout << *it << " ";
}
std::cout << "]" << std::endl;
// Get the gaussian kernel (9 elements)
gauss = cv::getGaussianKernel(9, -1, CV_32F);
// Display kernel values
it = gauss.begin<float>();
itend = gauss.end<float>();
std::cout << "9 = [";
for (; it != itend; ++it) {
std::cout << *it << " ";
}
std::cout << "]" << std::endl;
// Get the Deriv kernel (2.5)
cv::Mat kx, ky;
cv::getDerivKernels(kx, ky, 2, 2, 7, true);
// Display kernel values
cv::Mat_<float>::const_iterator kit = kx.begin<float>();
cv::Mat_<float>::const_iterator kitend = kx.end<float>();
std::cout << "[";
for (; kit != kitend; ++kit) {
std::cout << *kit << " ";
}
std::cout << "]" << std::endl;
// Read input image with salt&pepper noise
std::string path_salted = "F:/images/salted.bmp";
image = cv::imread(path_salted, 0);
if (!image.data)
return 0;
// Display the S&P image
cv::namedWindow("S&P Image");
cv::imshow("S&P Image", image);
// Blur the image with a mean filter
cv::blur(image, result, cv::Size(5, 5));
// Display the blurred image
cv::namedWindow("Mean filtered S&P Image");
cv::imshow("Mean filtered S&P Image", result);
// Applying a median filter
cv::medianBlur(image, result, 5);
// Display the blurred image
cv::namedWindow("Median filtered Image");
cv::imshow("Median filtered Image", result);
// Reduce by 4 the size of the image (the wrong way)
image = cv::imread(path_boldt, 0);
cv::Mat reduced(image.rows / 4, image.cols / 4, CV_8U);
for (int i = 0; i < reduced.rows; i++)
for (int j = 0; j < reduced.cols; j++)
reduced.at<uchar>(i, j) = image.at<uchar>(i * 4, j * 4);
// Display the reduced image
cv::namedWindow("Badly reduced Image");
cv::imshow("Badly reduced Image", reduced);
cv::resize(reduced, reduced, cv::Size(), 4, 4, cv::INTER_NEAREST);
// Display the (resized) reduced image
cv::namedWindow("Badly reduced");
cv::imshow("Badly reduced", reduced);
cv::imwrite("badlyreducedimage.bmp", reduced);
// first remove high frequency component
cv::GaussianBlur(image, image, cv::Size(11, 11), 1.75);
// keep only 1 of every 4 pixels
cv::Mat reduced2(image.rows / 4, image.cols / 4, CV_8U);
for (int i = 0; i < reduced2.rows; i++)
for (int j = 0; j < reduced2.cols; j++)
reduced2.at<uchar>(i, j) = image.at<uchar>(i * 4, j * 4);
// Display the reduced image
cv::namedWindow("Reduced Image, original size");
cv::imshow("Reduced Image, original size", reduced2);
cv::imwrite("reducedimage.bmp", reduced2);
// resizing with NN
cv::Mat newImage;
cv::resize(reduced2, newImage, cv::Size(), 4, 4, cv::INTER_NEAREST);
// Display the (resized) reduced image
cv::namedWindow("Reduced Image");
cv::imshow("Reduced Image", newImage);
// resizing with bilinear
cv::resize(reduced2, newImage, cv::Size(), 4, 4, cv::INTER_LINEAR);
// Display the (resized) reduced image
cv::namedWindow("Bilinear resizing");
cv::imshow("Bilinear resizing", newImage);
// Creating an image pyramid
cv::Mat pyramid(image.rows, image.cols + image.cols / 2 + image.cols / 4 + image.cols / 8, CV_8U, cv::Scalar(255));
image.copyTo(pyramid(cv::Rect(0, 0, image.cols, image.rows)));
cv::pyrDown(image, reduced); // reduce image size by half
reduced.copyTo(pyramid(cv::Rect(image.cols, image.rows / 2, image.cols / 2, image.rows / 2)));
cv::pyrDown(reduced, reduced2); // reduce image size by another half
reduced2.copyTo(pyramid(cv::Rect(image.cols + image.cols / 2, image.rows - image.rows / 4, image.cols / 4, image.rows / 4)));
cv::pyrDown(reduced2, reduced); // reduce image size by another half
reduced.copyTo(pyramid(cv::Rect(image.cols + image.cols / 2 + image.cols / 4, image.rows - image.rows / 8, image.cols / 8, image.rows / 8)));
// Display the pyramid
cv::namedWindow("Pyramid of images");
cv::imshow("Pyramid of images", pyramid);
cv::waitKey();
return 0;
}
int main()
{
test();
system("pause");
return 0;
}
Chapter07
1、blobs.cpp
结果
测试代码
#include <iostream>
#include <vector>
#include<opencv2/opencv.hpp>
static int test()
{
// Read input binary image
std::string path_binaryGroup = "F:/images/binaryGroup.bmp";
cv::Mat image = cv::imread(path_binaryGroup, 0);
if (!image.data)
return 0;
cv::namedWindow("Binary Image");
cv::imshow("Binary Image", image);
// Get the contours of the connected components
std::vector<std::vector<cv::Point> > contours;
cv::findContours(image,
contours, // a vector of contours
cv::RETR_EXTERNAL, // retrieve the external contours
cv::CHAIN_APPROX_NONE); // retrieve all pixels of each contours
// Print contours' length
std::cout << "Contours: " << contours.size() << std::endl;
std::vector<std::vector<cv::Point> >::const_iterator itContours = contours.begin();
for (; itContours != contours.end(); ++itContours) {
std::cout << "Size: " << itContours->size() << std::endl;
}
// draw black contours on white image
cv::Mat result(image.size(), CV_8U, cv::Scalar(255));
cv::drawContours(result, contours,
-1, // draw all contours
cv::Scalar(0), // in black
2); // with a thickness of 2
cv::namedWindow("Contours");
cv::imshow("Contours", result);
// Eliminate too short or too long contours
int cmin = 50; // minimum contour length
int cmax = 500; // maximum contour length
std::vector<std::vector<cv::Point> >::iterator itc = contours.begin();
while (itc != contours.end()) {
if (itc->size() < cmin || itc->size() > cmax)
itc = contours.erase(itc);
else
++itc;
}
// draw contours on the original image
std::string path_group = "F:/images/group.jpg";
cv::Mat original = cv::imread(path_group);
cv::drawContours(original, contours,
-1, // draw all contours
cv::Scalar(255, 255, 255), // in white
2); // with a thickness of 2
cv::namedWindow("Contours on Animals");
cv::imshow("Contours on Animals", original);
// Let's now draw black contours on white image
result.setTo(cv::Scalar(255));
cv::drawContours(result, contours,
-1, // draw all contours
0, // in black
1); // with a thickness of 1
image = cv::imread(path_binaryGroup, 0);
// testing the bounding box
cv::Rect r0 = cv::boundingRect(contours[0]);
// draw the rectangle
cv::rectangle(result, r0, 0, 2);
// testing the enclosing circle
float radius;
cv::Point2f center;
cv::minEnclosingCircle(contours[1], center, radius);
// draw the cricle
cv::circle(result, center, static_cast<int>(radius), 0, 2);
// testing the approximate polygon
std::vector<cv::Point> poly;
cv::approxPolyDP(contours[2], poly, 5, true);
// draw the polygon
cv::polylines(result, poly, true, 0, 2);
std::cout << "Polygon size: " << poly.size() << std::endl;
// testing the convex hull
std::vector<cv::Point> hull;
cv::convexHull(contours[3], hull);
// draw the polygon
cv::polylines(result, hull, true, 0, 2);
std::vector<cv::Vec4i> defects;
// cv::convexityDefects(contours[3], hull, defects);
// testing the moments
// iterate over all contours
itc = contours.begin();
while (itc != contours.end()) {
// compute all moments
cv::Moments mom = cv::moments(*itc++);
// draw mass center
cv::circle(result,
// position of mass center converted to integer
cv::Point(mom.m10 / mom.m00, mom.m01 / mom.m00),
2, cv::Scalar(0), 2); // draw black dot
}
cv::namedWindow("Some Shape descriptors");
cv::imshow("Some Shape descriptors", result);
// New call to findContours but with RETR_LIST flag
image = cv::imread(path_binaryGroup, 0);
// Get the contours of the connected components
cv::findContours(image,
contours, // a vector of contours
cv::RETR_LIST, // retrieve the external and internal contours
cv::CHAIN_APPROX_NONE); // retrieve all pixels of each contours
// draw black contours on white image
result.setTo(255);
cv::drawContours(result, contours,
-1, // draw all contours
0, // in black
2); // with a thickness of 2
cv::namedWindow("All Contours");
cv::imshow("All Contours", result);
// get a MSER image
cv::Mat components;
std::string path_mser = "F:/images/mser.bmp";
components = cv::imread(path_mser, 0);
// create a binary version
components = components == 255;
// open the image (white background)
cv::morphologyEx(components, components, cv::MORPH_OPEN, cv::Mat(), cv::Point(-1, -1), 3);
cv::namedWindow("MSER image");
cv::imshow("MSER image", components);
contours.clear();
//invert image (background must be black)
cv::Mat componentsInv = 255 - components;
// Get the contours of the connected components
cv::findContours(componentsInv,
contours, // a vector of contours
cv::RETR_EXTERNAL, // retrieve the external contours
cv::CHAIN_APPROX_NONE); // retrieve all pixels of each contours
// white image
cv::Mat quadri(components.size(), CV_8U, 255);
// for all contours
std::vector<std::vector<cv::Point> >::iterator it = contours.begin();
while (it != contours.end()) {
poly.clear();
// approximate contour by polygon
cv::approxPolyDP(*it, poly, 5, true);
// do we have a quadrilateral?
if (poly.size() == 4) {
// draw it
cv::polylines(quadri, poly, true, 0, 2);
}
++it;
}
cv::namedWindow("MSER quadrilateral");
cv::imshow("MSER quadrilateral", quadri);
cv::waitKey();
return 0;
}
int main()
{
test();
system("pause");
return 0;
}
2、contours.cpp
结果
测试代码
#include <iostream>
#include <vector>
#include<opencv2/opencv.hpp>
#include "linefinder.h"
#include "edgedetector.h"
static int test()
{
// Read input image
std::string path_road = "F:/images/road.jpg";
cv::Mat image = cv::imread(path_road, 0);
if (!image.data)
return 0;
// Display the image
cv::namedWindow("Original Image");
cv::imshow("Original Image", image);
// Compute Sobel
EdgeDetector ed;
ed.computeSobel(image);
// Display the Sobel orientation
cv::namedWindow("Sobel (orientation)");
cv::imshow("Sobel (orientation)", ed.getSobelOrientationImage());
cv::imwrite("ori.bmp", ed.getSobelOrientationImage());
// Display the Sobel low threshold
cv::namedWindow("Sobel (low threshold)");
cv::imshow("Sobel (low threshold)", ed.getBinaryMap(125));
// Display the Sobel high threshold
cv::namedWindow("Sobel (high threshold)");
cv::imshow("Sobel (high threshold)", ed.getBinaryMap(350));
// Apply Canny algorithm
cv::Mat contours;
cv::Canny(image, contours, 125, 350);
// Display the image of contours
cv::namedWindow("Canny Contours");
cv::imshow("Canny Contours", 255 - contours);
// Create a test image
cv::Mat test(200, 200, CV_8U, cv::Scalar(0));
cv::line(test, cv::Point(100, 0), cv::Point(200, 200), cv::Scalar(255));
cv::line(test, cv::Point(0, 50), cv::Point(200, 200), cv::Scalar(255));
cv::line(test, cv::Point(0, 200), cv::Point(200, 0), cv::Scalar(255));
cv::line(test, cv::Point(200, 0), cv::Point(0, 200), cv::Scalar(255));
cv::line(test, cv::Point(100, 0), cv::Point(100, 200), cv::Scalar(255));
cv::line(test, cv::Point(0, 100), cv::Point(200, 100), cv::Scalar(255));
// Display the test image
cv::namedWindow("Test Image");
cv::imshow("Test Image", test);
cv::imwrite("test.bmp", test);
// Hough tranform for line detection
std::vector<cv::Vec2f> lines;
cv::HoughLines(contours, lines, 1, PI / 180, 50);
// Draw the lines
cv::Mat result(contours.rows, contours.cols, CV_8U, cv::Scalar(255));
image.copyTo(result);
std::cout << "Lines detected: " << lines.size() << std::endl;
std::vector<cv::Vec2f>::const_iterator it = lines.begin();
while (it != lines.end()) {
float rho = (*it)[0]; // first element is distance rho
float theta = (*it)[1]; // second element is angle theta
if (theta < PI / 4. || theta > 3.*PI / 4.) { // ~vertical line
// point of intersection of the line with first row
cv::Point pt1(rho / cos(theta), 0);
// point of intersection of the line with last row
cv::Point pt2((rho - result.rows*sin(theta)) / cos(theta), result.rows);
// draw a white line
cv::line(result, pt1, pt2, cv::Scalar(255), 1);
}
else { // ~horizontal line
// point of intersection of the line with first column
cv::Point pt1(0, rho / sin(theta));
// point of intersection of the line with last column
cv::Point pt2(result.cols, (rho - result.cols*cos(theta)) / sin(theta));
// draw a white line
cv::line(result, pt1, pt2, cv::Scalar(255), 1);
}
std::cout << "line: (" << rho << "," << theta << ")\n";
++it;
}
// Display the detected line image
cv::namedWindow("Lines with Hough");
cv::imshow("Lines with Hough", result);
// Create LineFinder instance
LineFinder ld;
// Set probabilistic Hough parameters
ld.setLineLengthAndGap(100, 20);
ld.setMinVote(60);
// Detect lines
std::vector<cv::Vec4i> li = ld.findLines(contours);
ld.drawDetectedLines(image);
cv::namedWindow("Lines with HoughP");
cv::imshow("Lines with HoughP", image);
std::vector<cv::Vec4i>::const_iterator it2 = li.begin();
while (it2 != li.end()) {
std::cout << "(" << (*it2)[0] << "," << (*it2)[1] << ")-("
<< (*it2)[2] << "," << (*it2)[3] << ")" << std::endl;
++it2;
}
// Display one line
image = cv::imread(path_road, 0);
int n = 0;
cv::line(image, cv::Point(li[n][0], li[n][1]), cv::Point(li[n][2], li[n][3]), cv::Scalar(255), 5);
cv::namedWindow("One line of the Image");
cv::imshow("One line of the Image", image);
// Extract the contour pixels of the first detected line
cv::Mat oneline(image.size(), CV_8U, cv::Scalar(0));
cv::line(oneline, cv::Point(li[n][0], li[n][1]), cv::Point(li[n][2], li[n][3]), cv::Scalar(255), 3);
cv::bitwise_and(contours, oneline, oneline);
cv::namedWindow("One line");
cv::imshow("One line", 255 - oneline);
std::vector<cv::Point> points;
// Iterate over the pixels to obtain all point positions
for (int y = 0; y < oneline.rows; y++) {
uchar* rowPtr = oneline.ptr<uchar>(y);
for (int x = 0; x < oneline.cols; x++) {
// if on a contour
if (rowPtr[x]) {
points.push_back(cv::Point(x, y));
}
}
}
// find the best fitting line
cv::Vec4f line;
cv::fitLine(points, line, cv::DIST_L2, 0, 0.01, 0.01);
std::cout << "line: (" << line[0] << "," << line[1] << ")(" << line[2] << "," << line[3] << ")\n";
int x0 = line[2]; // a point on the line
int y0 = line[3];
int x1 = x0 + 100 * line[0]; // add a vector of length 100
int y1 = y0 + 100 * line[1];
image = cv::imread(path_road, 0);
// draw the line
cv::line(image, cv::Point(x0, y0), cv::Point(x1, y1), 0, 2);
cv::namedWindow("Fitted line");
cv::imshow("Fitted line", image);
// eliminate inconsistent lines
ld.removeLinesOfInconsistentOrientations(ed.getOrientation(), 0.4, 0.1);
// Display the detected line image
image = cv::imread(path_road, 0);
ld.drawDetectedLines(image);
cv::namedWindow("Detected Lines (2)");
cv::imshow("Detected Lines (2)", image);
// Create a Hough accumulator
cv::Mat acc(200, 180, CV_8U, cv::Scalar(0));
// Choose a point
int x = 50, y = 30;
// loop over all angles
for (int i = 0; i < 180; i++) {
double theta = i * PI / 180.;
// find corresponding rho value
double rho = x * std::cos(theta) + y * std::sin(theta);
int j = static_cast<int>(rho + 100.5);
std::cout << i << "," << j << std::endl;
// increment accumulator
acc.at<uchar>(j, i)++;
}
// draw the axes
cv::line(acc, cv::Point(0, 0), cv::Point(0, acc.rows - 1), 255);
cv::line(acc, cv::Point(acc.cols - 1, acc.rows - 1), cv::Point(0, acc.rows - 1), 255);
cv::imwrite("hough1.bmp", 255 - (acc * 100));
// Choose a second point
x = 30, y = 10;
// loop over all angles
for (int i = 0; i < 180; i++) {
double theta = i * PI / 180.;
double rho = x * cos(theta) + y * sin(theta);
int j = static_cast<int>(rho + 100.5);
acc.at<uchar>(j, i)++;
}
cv::namedWindow("Hough Accumulator");
cv::imshow("Hough Accumulator", acc * 100);
cv::imwrite("hough2.bmp", 255 - (acc * 100));
// Detect circles
std::string path_chariot = "F:/images/chariot.jpg";
image = cv::imread(path_chariot, 0);
cv::GaussianBlur(image, image, cv::Size(5, 5), 1.5);
std::vector<cv::Vec3f> circles;
cv::HoughCircles(image, circles, cv::HOUGH_GRADIENT,
2, // accumulator resolution (size of the image / 2)
20, // minimum distance between two circles
200, // Canny high threshold
60, // minimum number of votes
15, 50); // min and max radius
std::cout << "Circles: " << circles.size() << std::endl;
// Draw the circles
image = cv::imread(path_chariot, 0);
std::vector<cv::Vec3f>::const_iterator itc = circles.begin();
while (itc != circles.end()) {
cv::circle(image,
cv::Point((*itc)[0], (*itc)[1]), // circle centre
(*itc)[2], // circle radius
cv::Scalar(255), // color
2); // thickness
++itc;
}
cv::namedWindow("Detected Circles");
cv::imshow("Detected Circles", image);
cv::waitKey();
return 0;
}
int main()
{
test();
system("pause");
return 0;
}
edgedetector.h头文件
#if !defined SOBELEDGES
#define SOBELEDGES
#include<opencv2/opencv.hpp>
class EdgeDetector {
private:
// original image
cv::Mat img;
// 16-bit signed int image
cv::Mat sobel;
// Aperture size of the Sobel kernel
int aperture;
// Sobel magnitude
cv::Mat sobelMagnitude;
// Sobel orientation
cv::Mat sobelOrientation;
public:
EdgeDetector() : aperture(3) {}
// Set the aperture size of the kernel
void setAperture(int a) {
aperture = a;
}
// Get the aperture size of the kernel
int getAperture() const {
return aperture;
}
// Compute the Sobel
void computeSobel(const cv::Mat& image) {
cv::Mat sobelX;
cv::Mat sobelY;
// Compute Sobel
cv::Sobel(image, sobelX, CV_32F, 1, 0, aperture);
cv::Sobel(image, sobelY, CV_32F, 0, 1, aperture);
// Compute magnitude and orientation
cv::cartToPolar(sobelX, sobelY, sobelMagnitude, sobelOrientation);
}
// Compute the Sobel
void computeSobel(const cv::Mat& image, cv::Mat &sobelX, cv::Mat &sobelY) {
// Compute Sobel
cv::Sobel(image, sobelX, CV_32F, 1, 0, aperture);
cv::Sobel(image, sobelY, CV_32F, 0, 1, aperture);
// Compute magnitude and orientation
cv::cartToPolar(sobelX, sobelY, sobelMagnitude, sobelOrientation);
}
// Get Sobel magnitude
cv::Mat getMagnitude() {
return sobelMagnitude;
}
// Get Sobel orientation
cv::Mat getOrientation() {
return sobelOrientation;
}
// Get a thresholded binary map
cv::Mat getBinaryMap(double threshold) {
cv::Mat bin;
cv::threshold(sobelMagnitude, bin, threshold, 255, cv::THRESH_BINARY_INV);
return bin;
}
// Get a CV_8U image of the Sobel
cv::Mat getSobelImage() {
cv::Mat bin;
double minval, maxval;
cv::minMaxLoc(sobelMagnitude, &minval, &maxval);
sobelMagnitude.convertTo(bin, CV_8U, 255 / maxval);
return bin;
}
// Get a CV_8U image of the Sobel orientation
// 1 gray-level = 2 degrees
cv::Mat getSobelOrientationImage() {
cv::Mat bin;
sobelOrientation.convertTo(bin, CV_8U, 90 / PI);
return bin;
}
};
#endif
linefinder.h头文件
#if !defined LINEF
#define LINEF
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
const double PI = 3.1415926;
class LineFinder {
private:
// original image
cv::Mat img;
// vector containing the end points
// of the detected lines
std::vector<cv::Vec4i> lines;
// accumulator resolution parameters
double deltaRho;
double deltaTheta;
// minimum number of votes that a line
// must receive before being considered
int minVote;
// min length for a line
double minLength;
// max allowed gap along the line
double maxGap;
public:
// Default accumulator resolution is 1 pixel by 1 degree
// no gap, no mimimum length
LineFinder() : deltaRho(1), deltaTheta(PI / 180), minVote(10), minLength(0.), maxGap(0.) {}
// Set the resolution of the accumulator
void setAccResolution(double dRho, double dTheta) {
deltaRho = dRho;
deltaTheta = dTheta;
}
// Set the minimum number of votes
void setMinVote(int minv) {
minVote = minv;
}
// Set line length and gap
void setLineLengthAndGap(double length, double gap) {
minLength = length;
maxGap = gap;
}
// Apply probabilistic Hough Transform
std::vector<cv::Vec4i> findLines(cv::Mat& binary) {
lines.clear();
cv::HoughLinesP(binary, lines, deltaRho, deltaTheta, minVote, minLength, maxGap);
return lines;
}
// Draw the detected lines on an image
void drawDetectedLines(cv::Mat &image, cv::Scalar color = cv::Scalar(255, 255, 255)) {
// Draw the lines
std::vector<cv::Vec4i>::const_iterator it2 = lines.begin();
while (it2 != lines.end()) {
cv::Point pt1((*it2)[0], (*it2)[1]);
cv::Point pt2((*it2)[2], (*it2)[3]);
cv::line(image, pt1, pt2, color);
++it2;
}
}
// Eliminates lines that do not have an orientation equals to
// the ones specified in the input matrix of orientations
// At least the given percentage of pixels on the line must
// be within plus or minus delta of the corresponding orientation
std::vector<cv::Vec4i> removeLinesOfInconsistentOrientations(
const cv::Mat &orientations, double percentage, double delta) {
std::vector<cv::Vec4i>::iterator it = lines.begin();
// check all lines
while (it != lines.end()) {
// end points
int x1 = (*it)[0];
int y1 = (*it)[1];
int x2 = (*it)[2];
int y2 = (*it)[3];
// line orientation + 90o to get the parallel line
double ori1 = atan2(static_cast<double>(y1 - y2), static_cast<double>(x1 - x2)) + PI / 2;
if (ori1 > PI) ori1 = ori1 - 2 * PI;
double ori2 = atan2(static_cast<double>(y2 - y1), static_cast<double>(x2 - x1)) + PI / 2;
if (ori2 > PI) ori2 = ori2 - 2 * PI;
// for all points on the line
cv::LineIterator lit(orientations, cv::Point(x1, y1), cv::Point(x2, y2));
int i, count = 0;
for (i = 0, count = 0; i < lit.count; i++, ++lit) {
float ori = *(reinterpret_cast<float *>(*lit));
// is line orientation similar to gradient orientation ?
if (std::min(fabs(ori - ori1), fabs(ori - ori2)) < delta)
count++;
}
double consistency = count / static_cast<double>(i);
// set to zero lines of inconsistent orientation
if (consistency < percentage) {
(*it)[0] = (*it)[1] = (*it)[2] = (*it)[3] = 0;
}
++it;
}
return lines;
}
};
#endif