前言:
最近在学习matlab的图像处理,边缘锐化需要用到的重要工具之一就是矩阵纵向差分,这里分享一下两种风格的代码。
第一种:
基本没用多的库函数,但边界处理中稍微有点麻烦
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat verticalDiff(const Mat& image)
{
Mat grayImage;
cvtColor(image, grayImage, COLOR_BGR2GRAY);
Mat diffimage = grayImage.clone();
for (int y = 0; y < grayImage.rows; y++)
{
for (int x = 0; x < grayImage.cols; x++)
{
if (y == 0 || y == grayImage.rows - 1 || x == 0 || x == grayImage.cols-1)
{
diffimage.at<uchar>(y, x) = 0;
}
else
{
int diff = grayImage.at<uchar>(y, x + 1) - grayImage.at<uchar>(y, x - 1);
diffimage.at<uchar>(y, x) = max(min(255, diff), 0);
}
}
}
return diffimage;
}
int main()
{
Mat image = imread("C:/Users/86137/Pictures/Screenshots/屏幕截图 2024-03-16 001448.png", IMREAD_COLOR);
if (image.empty())
{
cout << "Failed to read the image!" << endl;
return -1;
}
Mat diffimage = verticalDiff(image);
namedWindow("Original image", WINDOW_NORMAL);
imshow("Original image", image);
namedWindow("Vertical Diff Image", WINDOW_NORMAL);
imshow("Vertical Diff Image", diffimage);
waitKey(0);
return 0;
}
第二种:
(这里用的是双向差分,但是原理一样),用了sobel函数,简单搬运一下sobel的定义:
- Sobel(src, dst, ddepth, dx, dy, ksize, scale, delta, borderType);
src
: 输入图像,通常为灰度图像。dst
: 输出图像,用于保存 Sobel 梯度的结果。ddepth
: 输出图像的深度,通常为CV_16S
或CV_32F
。在梯度计算中,为了避免数据溢出,通常会选择一个比较高的深度,如CV_16S
。dx
: x 方向上的导数阶数。dy
: y 方向上的导数阶数。ksize
: Sobel 核的大小,通常为 1、3、5 或者 7。scale
: 梯度放缩因子,通常为 1。delta
: 在梯度计算结果上加的偏置,通常为 0。borderType
: 边界扩展方式,通常为BORDER_DEFAULT
。代码里用Sobel(src,dst,ddepth,dx,dy)来求x,y两个方向的差分
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat bidirectionalDiff(const Mat& image)
{
Mat grayImage;
cvtColor(image, grayImage, COLOR_BGR2GRAY);
Mat diffImage = grayImage.clone();
// Compute horizontal difference
Mat horizontalDiff;
Sobel(grayImage, horizontalDiff, CV_16S, 1, 0);
// Compute vertical difference
Mat verticalDiff;
Sobel(grayImage, verticalDiff, CV_16S, 0, 1);
// Combine horizontal and vertical differences
for (int y = 0; y < grayImage.rows; y++)
{
for (int x = 0; x < grayImage.cols; x++)
{
int diffH = abs(horizontalDiff.at<short>(y, x));
int diffV = abs(verticalDiff.at<short>(y, x));
diffImage.at<uchar>(y, x) = saturate_cast<uchar>(sqrt(diffH * diffH + diffV * diffV));
}
}
return diffImage;
}
int main()
{
Mat image = imread("image.jpg", IMREAD_COLOR);
if (image.empty())
{
cout << "Failed to read the image!" << endl;
return -1;
}
Mat diffImage = bidirectionalDiff(image);
namedWindow("Original image", WINDOW_NORMAL);
imshow("Original image", image);
namedWindow("Bidirectional Diff Image", WINDOW_NORMAL);
imshow("Bidirectional Diff Image", diffImage);
waitKey(0);
return 0;
}
因为处理比较初级,所以划分效果也差强人意,但是对matlab处理图的原理更清楚了。下面是双向差分的结果