canny opecv( c++ )实现


一、结果

1、原图

在这里插入图片描述

2、最终效果

在这里插入图片描述

二、实现过程

1、图片灰度化
    //转换为灰度图
    Mat gray_image;
    cvtColor(row_image, gray_image, CV_RGB2GRAY);
    imshow("gray image", gray_image);
2、高斯滤波
    //高斯滤波
    Mat gausKernel;
    int kernel_size = 5;
    double sigma = 1;
    Mat gaus_image;
    GaussianBlur(gray_image, gaus_image, Size(kernel_size, kernel_size), sigma);
    imshow("gaus image", gaus_image);
3、Sobel卷积核计算X、Y方向梯度和梯度角

这里使用较为常用的Sobel算子计算X和Y方向上的梯度以及梯度的方向角,Sobel的X和Y方向的卷积因子为:

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-1pCEoQGz-1618824675897)(/home/joshua/.config/Typora/typora-user-images/image-20210419165239735.png)]

/*
计算梯度值和方向
@param: imageSource 原始灰度图
@param: imageX X方向梯度图像
@param: imageY Y方向梯度图像
@param: gradXY 该点的梯度幅值
@param: pointDirection 梯度方向角度
*/
void sobelGradDirection(const Mat imageSource, Mat &imageX, Mat &imageY, Mat &gradXY, Mat &pointDirection) {
    imageX = Mat::zeros(imageSource.size(), CV_32SC1);
    imageY = Mat::zeros(imageSource.size(), CV_32SC1);
    gradXY = Mat::zeros(imageSource.size(), CV_32SC1);
    pointDirection = Mat::zeros(imageSource.size(), CV_32SC1);

    int rows = imageSource.rows;
    int cols = imageSource.cols;

    int step = imageSource.step;
    /*
    Mat.step参数指图像的一行实际占用的内存长度,
    因为opencv中的图像会对每行的长度自动补齐(8的倍数),
    编程时尽量使用指针,指针读写像素是速度最快的,使用at函数最慢。
    */
    uchar *P = imageSource.data;
    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            int a00 = P[(i - 1) * step + j - 1];
            int a01 = P[(i - 1) * step + j];
            int a02 = P[(i - 1) * step + j + 1];

            int a10 = P[i * step + j - 1];
            int a11 = P[i * step + j];
            int a12 = P[i * step + j + 1];

            int a20 = P[(i + 1) * step + j - 1];
            int a21 = P[(i + 1) * step + j];
            int a22 = P[(i + 1) * step + j + 1];

            double gradY = double(a02 + 2 * a12 + a22 - a00 - 2 * a10 - a20);
            double gradX = double(a00 + 2 * a01 + a02 - a20 - 2 * a21 - a22);

            imageX.at<int>(i, j) = abs(gradX);
            imageY.at<int>(i, j) = abs(gradY);
            if (gradX == 0) gradX = 0.000000000001;  //防止除数为0异常
            // 由于atan求得的角度范围是-π/2~π/2,为了便于计算,这里对每个梯度角加了一个π/2,使范围变成0~π,便于计算。
            pointDirection.at<int>(i, j) = atan(gradY / gradX) * 57.3;
            pointDirection.at<int>(i, j) = (pointDirection.at<int>(i, j) + 360) % 360;
            // 计算X和Y方向融合的梯度幅值
            gradXY.at<int>(i, j) = sqrt(gradX * gradX + gradY * gradY);
        }
    }
    // absolute
    convertScaleAbs(imageX, imageX);
    convertScaleAbs(imageY, imageY);
    convertScaleAbs(gradXY, gradXY);

}
4、非极大值抑制

目的: 求幅值图像进行非极大值抑制,可以进一步消除非边缘的噪点,更重要的是,可以细化边缘。

抑制逻辑是:沿着该点梯度方向,比较前后两个点的幅值大小,若该点大于前后两点,则保留,若该点小于前后两点,则置为0;

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-LWWbjcY6-1618824675898)(/home/joshua/.config/Typora/typora-user-images/image-20210419164700280.png)]

图中四条虚线代表图像中每一点可能的梯度方向,沿着梯度方向与边界的上下两个交点,就是需要拿来与中心点点(X0,Y0)做比较的点。交点值的计算采用插值法计算,以黄色的虚线所代表的梯度角Θ为例,交点处幅值为:
P(X0,Y0)+(P(X0-1,Y0+1)-P(X0,Y0))*tan(Θ)

/*
局部非极大值抑制
沿着该点梯度方向,比较前后两个点的幅值大小,若该点大于前后两点,则保留,
若该点小于前后两点任意一点,则置为0;
@param: imageInput 输入得到梯度图像
@param: imageOutput 输出的非极大值抑制图像
@param: theta 每个像素点的梯度方向角度
@param: imageX X方向梯度
@param: imageY Y方向梯度
*/
void localNonMaxValueSuppression(const Mat &imageInput, Mat &imageOutput, const Mat &theta, const Mat &imageX,
                                 const Mat &imageY) {
    imageOutput = imageInput.clone();

    int cols = imageInput.cols;
    int rows = imageInput.rows;

    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            if (0 == imageInput.at<uchar>(i, j))continue;
            // 周围的9个点
            int g00 = imageInput.at<uchar>(i - 1, j - 1);
            int g01 = imageInput.at<uchar>(i - 1, j);
            int g02 = imageInput.at<uchar>(i - 1, j + 1);

            int g10 = imageInput.at<uchar>(i, j - 1);
            int g11 = imageInput.at<uchar>(i, j);
            int g12 = imageInput.at<uchar>(i, j + 1);

            int g20 = imageInput.at<uchar>(i + 1, j - 1);
            int g21 = imageInput.at<uchar>(i + 1, j);
            int g22 = imageInput.at<uchar>(i + 1, j + 1);

            int direction = theta.at<int>(i, j); //该点梯度的角度值
            double tmp1; //保存亚像素点插值得到的灰度数
            double tmp2;
            double weight = fabs((double) imageY.at<uchar>(i, j) / (double) imageX.at<uchar>(i, j));

            if (weight == 0)weight = 0.0000001; //防止除数为0异常
            if (weight > 1) {
                weight = 1 / weight;
            }

            if ((0 <= direction && direction < 45) || 180 <= direction && direction < 225) {
                tmp1 = g10 * (1 - weight) + g20 * (weight);
                tmp2 = g02 * (weight) + g12 * (1 - weight);
            }

            if ((45 <= direction && direction < 90) || 225 <= direction && direction < 270) {
                tmp1 = g01 * (1 - weight) + g02 * (weight);
                tmp2 = g20 * (weight) + g21 * (1 - weight);
            }

            if ((90 <= direction && direction < 135) || 270 <= direction && direction < 315) {
                tmp1 = g00 * (weight) + g01 * (1 - weight);
                tmp2 = g21 * (1 - weight) + g22 * (weight);
            }

            if ((135 <= direction && direction < 180) || 315 <= direction && direction < 360) {
                tmp1 = g00 * (weight) + g10 * (1 - weight);
                tmp2 = g12 * (1 - weight) + g22 * (weight);
            }
            // 若该点小于前后两点,则置为0
            if (imageInput.at<uchar>(i, j) < tmp1 || imageInput.at<uchar>(i, j) < tmp2)
                imageOutput.at<uchar>(i, j) = 0;
        }
    }

}
5、双阈值

指定一个低阈值low threshold,一个高阈值high threshold ,一般取high threshold为图像整体灰度级分布的70%,且high threshold为1.5到2倍大小的low threshold;

灰度值大于high threshold的,置为255,灰度值小于low threshold的,置为0;

void doubleThreshold(Mat imageInput, const double lowThreshold, const double highThreshold, Mat& imageOut) {
    imageOut = imageInput.clone();
    int cols = imageInput.cols;
    int rows = imageInput.rows;
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            double temp = imageOut.at<uchar>(i, j);
            temp = temp > highThreshold ? (255) : (temp);
            temp = temp < lowThreshold ? (0) : (temp);
            imageOut.at<uchar>(i, j) = temp;
        }
    }

6、双阈值连接处理

灰度值介于low threshold和high threshold 之间的,考察改像素点临近的8像素是否有灰度值为255的,若没有255的,表示这是一个孤立的局部极大值点,予以排除,置为0;若有255的,表示这是一个跟其他边缘有“接壤”的可造之材,置为255,之后重复执行该步骤,直到考察完之后一个像素点。

void doubleThresholdLink(Mat imageInput, double lowTh, double highTh, Mat& imageOut) {
    imageInput = imageInput.clone();
    int cols = imageInput.cols;
    int rows = imageInput.rows;
    // 遍历每一个像素点
    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            double pix = imageOut.at<uchar>(i, j);
            if (pix != 255)continue;
            bool change = false;
            // 遍历周围8个点看是否处于阈值当中,并置为255
            for (int k = -1; k <= 1; k++) {
                for (int u = -1; u <= 1; u++) {
                    // 排除自己
                    if (k == 0 && u == 0)continue;
                    double temp = imageOut.at<uchar>(i + k, j + u);
                    if (temp >= lowTh && temp <= highTh) {
                        imageOut.at<uchar>(i + k, j + u) = 255;
                        change = true;
                    }
                }
            }
            if (change) {
                if (i > 1)i--;
                if (j > 2)j -= 2;
            }
        }
    }
    // 不为255的置为0
    for (int i = 0; i < rows; i++)
        for (int j = 0; j < cols; j++)
            if (imageOut.at<uchar>(i, j) != 255)
                imageOut.at<uchar>(i, j) = 0;

}

代码

#include <iostream>
#include "opencv2/opencv.hpp"

using namespace std;
using namespace cv;

const int threshold_slider_max = 255;
int low_threshold = 60, high_threshold = 180;

/*
计算梯度值和方向
@param: imageSource 原始灰度图
@param: imageX X方向梯度图像
@param: imageY Y方向梯度图像
@param: gradXY 该点的梯度幅值
@param: pointDirection 梯度方向角度
*/
void sobelGradDirection(const Mat imageSource, Mat &imageX, Mat &imageY, Mat &gradXY, Mat &pointDirection) {
    imageX = Mat::zeros(imageSource.size(), CV_32SC1);
    imageY = Mat::zeros(imageSource.size(), CV_32SC1);
    gradXY = Mat::zeros(imageSource.size(), CV_32SC1);
    pointDirection = Mat::zeros(imageSource.size(), CV_32SC1);

    int rows = imageSource.rows;
    int cols = imageSource.cols;

    int step = imageSource.step;
    /*
    Mat.step参数指图像的一行实际占用的内存长度,
    因为opencv中的图像会对每行的长度自动补齐(8的倍数),
    编程时尽量使用指针,指针读写像素是速度最快的,使用at函数最慢。
    */
    uchar *P = imageSource.data;
    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            int a00 = P[(i - 1) * step + j - 1];
            int a01 = P[(i - 1) * step + j];
            int a02 = P[(i - 1) * step + j + 1];

            int a10 = P[i * step + j - 1];
            int a11 = P[i * step + j];
            int a12 = P[i * step + j + 1];

            int a20 = P[(i + 1) * step + j - 1];
            int a21 = P[(i + 1) * step + j];
            int a22 = P[(i + 1) * step + j + 1];

            double gradY = double(a02 + 2 * a12 + a22 - a00 - 2 * a10 - a20);
            double gradX = double(a00 + 2 * a01 + a02 - a20 - 2 * a21 - a22);

            imageX.at<int>(i, j) = abs(gradX);
            imageY.at<int>(i, j) = abs(gradY);
            if (gradX == 0) gradX = 0.000000000001;  //防止除数为0异常
            // 由于atan求得的角度范围是-π/2~π/2,为了便于计算,这里对每个梯度角加了一个π/2,使范围变成0~π,便于计算。
            pointDirection.at<int>(i, j) = atan(gradY / gradX) * 57.3;
            pointDirection.at<int>(i, j) = (pointDirection.at<int>(i, j) + 360) % 360;
            // 计算X和Y方向融合的梯度幅值
            gradXY.at<int>(i, j) = sqrt(gradX * gradX + gradY * gradY);
        }
    }
    // absolute
    convertScaleAbs(imageX, imageX);
    convertScaleAbs(imageY, imageY);
    convertScaleAbs(gradXY, gradXY);

}

/*
局部非极大值抑制
沿着该点梯度方向,比较前后两个点的幅值大小,若该点大于前后两点,则保留,
若该点小于前后两点任意一点,则置为0;
@param: imageInput 输入得到梯度图像
@param: imageOutput 输出的非极大值抑制图像
@param: theta 每个像素点的梯度方向角度
@param: imageX X方向梯度
@param: imageY Y方向梯度
*/
void localNonMaxValueSuppression(const Mat &imageInput, Mat &imageOutput, const Mat &theta, const Mat &imageX,
                                 const Mat &imageY) {
    imageOutput = imageInput.clone();

    int cols = imageInput.cols;
    int rows = imageInput.rows;

    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            if (0 == imageInput.at<uchar>(i, j))continue;
            // 周围的9个点
            int g00 = imageInput.at<uchar>(i - 1, j - 1);
            int g01 = imageInput.at<uchar>(i - 1, j);
            int g02 = imageInput.at<uchar>(i - 1, j + 1);

            int g10 = imageInput.at<uchar>(i, j - 1);
            int g11 = imageInput.at<uchar>(i, j);
            int g12 = imageInput.at<uchar>(i, j + 1);

            int g20 = imageInput.at<uchar>(i + 1, j - 1);
            int g21 = imageInput.at<uchar>(i + 1, j);
            int g22 = imageInput.at<uchar>(i + 1, j + 1);

            int direction = theta.at<int>(i, j); //该点梯度的角度值
            double tmp1; //保存亚像素点插值得到的灰度数
            double tmp2;
            double weight = fabs((double) imageY.at<uchar>(i, j) / (double) imageX.at<uchar>(i, j));

            if (weight == 0)weight = 0.0000001;
            if (weight > 1) {
                weight = 1 / weight;
            }

            if ((0 <= direction && direction < 45) || 180 <= direction && direction < 225) {
                tmp1 = g10 * (1 - weight) + g20 * (weight);
                tmp2 = g02 * (weight) + g12 * (1 - weight);
            }

            if ((45 <= direction && direction < 90) || 225 <= direction && direction < 270) {
                tmp1 = g01 * (1 - weight) + g02 * (weight);
                tmp2 = g20 * (weight) + g21 * (1 - weight);
            }

            if ((90 <= direction && direction < 135) || 270 <= direction && direction < 315) {
                tmp1 = g00 * (weight) + g01 * (1 - weight);
                tmp2 = g21 * (1 - weight) + g22 * (weight);
            }

            if ((135 <= direction && direction < 180) || 315 <= direction && direction < 360) {
                tmp1 = g00 * (weight) + g10 * (1 - weight);
                tmp2 = g12 * (1 - weight) + g22 * (weight);
            }
            // 若该点小于前后两点,则置为0
            if (imageInput.at<uchar>(i, j) < tmp1 || imageInput.at<uchar>(i, j) < tmp2)
                imageOutput.at<uchar>(i, j) = 0;
        }
    }

}

/*
双阈值的机理是:
指定一个低阈值A,一个高阈值B,一般取B为图像整体灰度级分布的70%,且B为1.5到2倍大小的A;
灰度值小于A的,置为0,灰度值大于B的,置为255;
*/
void doubleThreshold(Mat imageInput, const double lowThreshold, const double highThreshold, Mat& imageOut) {
    imageOut = imageInput.clone();
    int cols = imageInput.cols;
    int rows = imageInput.rows;
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            double temp = imageOut.at<uchar>(i, j);
            temp = temp > highThreshold ? (255) : (temp);
            temp = temp < lowThreshold ? (0) : (temp);
            imageOut.at<uchar>(i, j) = temp;
        }
    }
}

/*
连接处理:
灰度值介于A和B之间的,考察该像素点临近的8像素是否有灰度值为255的,
若没有255的,表示这是一个孤立的局部极大值点,予以排除,置为0;
若有255的,表示这是一个跟其他边缘有“接壤”的可造之材,置为255,
之后重复执行该步骤,直到考察完之后一个像素点。

其中的邻域跟踪算法,从值为255的像素点出发找到周围满足要求的点,把满足要求的点设置为255,
然后修改i,j的坐标值,i,j值进行回退,在改变后的i,j基础上继续寻找255周围满足要求的点。
当所有连接255的点修改完后,再把所有上面所说的局部极大值点置为0;。

@param: imageInput:输入梯度图像
@param: lowTh:低阈值
@param: highTh:高阈值
@param: imageOut 输出的图像
*/
void doubleThresholdLink(Mat imageInput, double lowTh, double highTh, Mat& imageOut) {
    imageInput = imageInput.clone();
    int cols = imageInput.cols;
    int rows = imageInput.rows;
    // 遍历每一个像素点
    for (int i = 1; i < rows - 1; i++) {
        for (int j = 1; j < cols - 1; j++) {
            double pix = imageOut.at<uchar>(i, j);
            if (pix != 255)continue;
            bool change = false;
            // 遍历周围8个点看是否处于阈值当中,并置为255
            for (int k = -1; k <= 1; k++) {
                for (int u = -1; u <= 1; u++) {
                    // 排除自己
                    if (k == 0 && u == 0)continue;
                    double temp = imageOut.at<uchar>(i + k, j + u);
                    if (temp >= lowTh && temp <= highTh) {
                        imageOut.at<uchar>(i + k, j + u) = 255;
                        change = true;
                    }
                }
            }
            if (change) {
                if (i > 1)i--;
                if (j > 2)j -= 2;
            }
        }
    }
    // 不为255的置为0
    for (int i = 0; i < rows; i++)
        for (int j = 0; j < cols; j++)
            if (imageOut.at<uchar>(i, j) != 255)
                imageOut.at<uchar>(i, j) = 0;

}

Mat nonMaxSuppressionImage;

static void on_trackbar(int, void *) {
    //双阈值算法检测和边缘连接
    Mat canny_image;
    doubleThreshold(nonMaxSuppressionImage, low_threshold, high_threshold, canny_image);
    doubleThresholdLink(nonMaxSuppressionImage, low_threshold, high_threshold, canny_image);
    imshow("canny image", canny_image);
}

int main() {
    Mat row_image = imread("/home/joshua/Projects/C++/CV/canny/2.jpg");
    imshow("row image", row_image);

    //转换为灰度图
    Mat gray_image;
    cvtColor(row_image, gray_image, CV_RGB2GRAY);
    imshow("gray image", gray_image);

    //高斯滤波
    Mat gausKernel;
    int kernel_size = 5;
    double sigma = 1;
    Mat gaus_image;
    GaussianBlur(gray_image, gaus_image, Size(kernel_size, kernel_size), sigma);
    imshow("gaus image", gaus_image);

    //计算XY方向梯度
    Mat imageX, imageY, imageXY;
    Mat theta;
    sobelGradDirection(gaus_image, imageX, imageY, imageXY, theta);
    imshow("X grad direction", imageX);
    imshow("Y grad direction", imageY);
    imshow("XY grad directions", imageXY);

    //对梯度幅值进行非极大值抑制
    localNonMaxValueSuppression(imageXY, nonMaxSuppressionImage, theta, imageX, imageY);;
    imshow("Non-maximum suppression image", nonMaxSuppressionImage);

    char lowThresholdTrackbar[50], highThresholdTrackbar[50];
    namedWindow("canny image", WINDOW_AUTOSIZE); // Create Window
    sprintf(lowThresholdTrackbar, "low threshold : %d %", threshold_slider_max);
    sprintf(highThresholdTrackbar, "high threshold : %d %", threshold_slider_max);
    createTrackbar(lowThresholdTrackbar, "canny image", &low_threshold, threshold_slider_max, on_trackbar);
    createTrackbar(highThresholdTrackbar, "canny image", &high_threshold, threshold_slider_max, on_trackbar);

    Mat openCVcannyMat;
    Canny(row_image, openCVcannyMat, 60, 100);
    imshow("opencv canny image", openCVcannyMat);

    waitKey(0);
    return 0;
}

参考文献

https://blog.csdn.net/u010551600/article/details/80739610

https://blog.csdn.net/dcrmg/article/details/52344902

https://blog.csdn.net/dcrmg/article/details/52280768

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Joshua_yi

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值