OpenCVSharp 4.5 图像分割(距离变换和分水岭算法)

用OpenCVSharp 4.5 跑一遍 OpenCV 官方教程

原OpenCV 官方教程链接:OpenCV: Image Segmentation with Distance Transform and Watershed Algorithm

核心函数:

using System;
using OpenCvSharp;

namespace ConsoleApp1
{
    class tutorial29 : ITutorial
    {
        public void Run()
        {
            // Load the image           
            Mat src = Cv2.ImRead("I:\\csharp\\images\\cards.png");
            if (src.Empty())
            {
                Console.WriteLine("Could not open or find the image!");
                return;
            }
            // Show source image
            Cv2.ImShow("Source Image", src);

            // Change the background from white to black, since that will help later to extract
            // better results during the use of Distance Transform
            for (int i = 0; i < src.Rows; i++)
            {
                for (int j = 0; j < src.Cols; j++)
                {
                    if (src.At<Vec3b>(i, j) == new Vec3b(255, 255, 255))
                    {
                        src.At<Vec3b>(i, j)[0] = 0;
                        src.At<Vec3b>(i, j)[1] = 0;
                        src.At<Vec3b>(i, j)[2] = 0;
                    }
                }
            }
            // Show output image
            Cv2.ImShow("Black Background Image", src);
            // Create a kernel that we will use to sharpen our image
            double[,] k = { {1.0, 1.0, 1.0 },
                          { 1, -8, 1 },
                          { 1, 1, 1} };

            Mat kernel = Mat.FromArray(k);
            // an approximation of second derivative, a quite strong kernel
            // do the laplacian filtering as it is
            // well, we need to convert everything in something more deeper then CV_8U
            // because the kernel has some negative values,
            // and we can expect in general to have a Laplacian image with negative values
            // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
            // so the possible negative number will be truncated
            Mat imgLaplacian = new Mat();
            Cv2.Filter2D(src, imgLaplacian, MatType.CV_32F, kernel);
            Mat sharp = new Mat();
            src.ConvertTo(sharp, MatType.CV_32F);
            Mat imgResult = sharp - imgLaplacian;
            // convert back to 8bits gray scale
            imgResult.ConvertTo(imgResult, MatType.CV_8UC3);
            imgLaplacian.ConvertTo(imgLaplacian, MatType.CV_8UC3);
            // imshow( "Laplace Filtered Image", imgLaplacian );
            Cv2.ImShow("New Sharped Image", imgResult);
            // Create binary image from source image
            Mat bw = new Mat();
            Cv2.CvtColor(imgResult, bw, ColorConversionCodes.BGR2GRAY);
            Cv2.Threshold(bw, bw, 40, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu);
            Cv2.ImShow("Binary Image", bw);
            // Perform the distance transform algorithm
            Mat dist = new Mat();
            Cv2.DistanceTransform(bw, dist, DistanceTypes.L2, DistanceTransformMasks.Mask3);
            // Normalize the distance image for range = {0.0, 1.0}
            // so we can visualize and threshold it
            Cv2.Normalize(dist, dist, 0, 1.0, NormTypes.MinMax);
            Cv2.ImShow("Distance Transform Image", dist);
            // Threshold to obtain the peaks
            // This will be the markers for the foreground objects
            Cv2.Threshold(dist, dist, 0.4, 1.0, ThresholdTypes.Binary);
            // Dilate a bit the dist image
            Mat kernel1 = Mat.Ones(new Size(3, 3), MatType.CV_8U);

            Cv2.Dilate(dist, dist, kernel1);
            Cv2.ImShow("Peaks", dist);
            // Create the CV_8U version of the distance image
            // It is needed for findContours()
            Mat dist_8u = new Mat();
            dist.ConvertTo(dist_8u, MatType.CV_8U);
            // Find total markers
            Point[][] contours;
            HierarchyIndex[] hierarchyIndices;
            Cv2.FindContours(dist_8u, out contours, out hierarchyIndices, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
            // Create the marker image for the watershed algorithm
            Mat markers = Mat.Zeros(dist.Size(), MatType.CV_32S);
            // Draw the foreground markers
            for (int i = 0; i < contours.Length; i++)
            {
                Cv2.DrawContours(markers, contours, (int)i, new Scalar((int)i + 1), -1);
            }
            // Draw the background marker
            Cv2.Circle(markers, new Point(5, 5), 3, new Scalar(255), -1);
            Mat markers1 = markers * 10000;
            markers1.ConvertTo(markers1, MatType.CV_16S);
            Cv2.ImShow("Markers", markers1);
            // Perform the watershed algorithm
            Cv2.Watershed(imgResult, markers);
            Mat mark = new Mat();
            markers.ConvertTo(mark, MatType.CV_8U);
            Cv2.BitwiseNot(mark, mark);
            //    imshow("Markers_v2", mark); // uncomment this if you want to see how the mark
            // image looks like at that point
            // Generate random colors
            Vec3b[] colors = new Vec3b[contours.Length];
            RNG rng = new RNG((ulong)DateTime.Now.Ticks);
            for (int i = 0; i < contours.Length; i++)
            {
                colors[i] = new Vec3b((byte)rng.Uniform(0, 256), (byte)rng.Uniform(0, 256), (byte)rng.Uniform(0, 256));

            }
            // Create the result image
            Mat dst = Mat.Zeros(markers.Size(), MatType.CV_8UC3);
            // Fill labeled objects with random colors
            for (int i = 0; i < markers.Rows; i++)
            {
                for (int j = 0; j < markers.Cols; j++)
                {
                    int index = markers.At<int>(i, j);
                    if (index > 0 && index <= contours.Length)
                    {
                        dst.At<Vec3b>(i, j) = colors[index - 1];
                    }
                }
            }
            // Visualize the final image
            Cv2.ImShow("Final Result", dst);
            Cv2.WaitKey();
        }
    }
}

  • 3
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
基于分水岭算法图像分割是一种常用的图像处理技术,可以将图像分割成多个区域,每个区域内的像素具有相似的特征。在 OpenCV 中,可以使用 cv2.watershed() 函数实现基于分水岭算法图像分割。 下面是一个简单的 Python 示例,演示如何使用基于分水岭算法图像分割: ```python import cv2 import numpy as np # 读取图像 img = cv2.imread('image.jpg') # 转换为灰度图像 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 阈值分割 ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # 形态学操作 kernel = np.ones((3,3),np.uint8) opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2) # 距离变换 dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0) # 背景区域 sure_bg = cv2.dilate(opening,kernel,iterations=3) # 不确定区域 sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) # 标记连通区域 ret, markers = cv2.connectedComponents(sure_fg) markers = markers + 1 markers[unknown==255] = 0 # 应用分水岭算法 markers = cv2.watershed(img,markers) img[markers == -1] = [255,0,0] # 显示结果 cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows() ``` 在上面的示例中,首先读取一张图像,并将其转换为灰度图像。然后使用阈值分割算法图像二值化。接下来,进行形态学操作,以去除图像中的噪声。然后使用距离变换算法计算前景区域,并将其阈值化。接着,使用形态学操作计算背景区域。最后,使用 cv2.connectedComponents() 函数计算不确定区域,并使用标记连通区域的方法生成分水岭算法的输入标记图像。最后,应用 cv2.watershed() 函数进行图像分割,并在窗口中显示结果。 需要注意的是,分水岭算法的结果依赖于输入标记图像的质量,因此需要根据具体情况进行调整,比如阈值分割的参数、形态学操作的参数、距离变换的参数等。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值