opencv贾老师系列14——图像分割2(分水岭分割、Grabcut)

基于距离的分水岭分割

在这里插入图片描述
从背景中分离硬币和药片并计数

#include "pch.h"
#include <iostream>
#include<opencv2/opencv.hpp>
#include"opencv2/highgui/highgui.hpp"
#include"opencv2/imgproc/imgproc.hpp"

using namespace cv;
using namespace std;

int main(int argc, char** argv) {
	Mat src = imread("D:/opencv/图像分割/代码与图片/coins_001.jpg");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", CV_WINDOW_NORMAL);
	imshow("input image", src);

	Mat gray, binary, shifted;
	pyrMeanShiftFiltering(src, shifted, 21, 51);//边缘保留
	//imshow("shifted", shifted);

	cvtColor(shifted, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
	//imshow("binary", binary);

	// distance transform   距离变换
	Mat dist;
	distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
	//归一化  
	normalize(dist, dist, 0, 1, NORM_MINMAX);
	//imshow("distance result", dist);

	// binary 选择0.4--1的范围进行二值化
	threshold(dist, dist, 0.4, 1, THRESH_BINARY);
	//imshow("distance binary", dist);

	// markers   convertTo矩阵数据类型转换
	Mat dist_m;
	dist.convertTo(dist_m, CV_8U);
	vector<vector<Point>> contours;
	//寻找最外界轮廓
	findContours(dist_m, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));

	// create markers
	Mat markers = Mat::zeros(src.size(), CV_32SC1);
	for (size_t t = 0; t < contours.size(); t++) {
		drawContours(markers, contours, static_cast<int>(t), Scalar::all(static_cast<int>(t) + 1), -1);
	}
	circle(markers, Point(5, 5), 3, Scalar(255), -1);
	//imshow("markers", markers*10000);

	// 形态学操作(腐蚀) - 彩色图像,目的是去掉干扰,让结果更好
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(src, src, MORPH_ERODE, k);

	// 完成分水岭变换
	watershed(src, markers);
	Mat mark = Mat::zeros(markers.size(), CV_8UC1);
	markers.convertTo(mark, CV_8UC1);
	bitwise_not(mark, mark, Mat());
	//imshow("watershed result", mark);

	// generate random color  形成随机颜色
	vector<Vec3b> colors;
	for (size_t i = 0; i < contours.size(); i++) {
		int r = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int b = theRNG().uniform(0, 255);
		colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}

	// 颜色填充与最终显示
	Mat dst = Mat::zeros(markers.size(), CV_8UC3);
	int index = 0;
	for (int row = 0; row < markers.rows; row++) {
		for (int col = 0; col < markers.cols; col++) {
			index = markers.at<int>(row, col);
			if (index > 0 && index <= contours.size()) {
				dst.at<Vec3b>(row, col) = colors[index - 1];
			}
			else {
				dst.at<Vec3b>(row, col) = Vec3b(0, 0, 0);
			}
		}
	}
	namedWindow("output image", CV_WINDOW_NORMAL);
	imshow("output image", dst);
	printf("number of objects : %d\n", contours.size());
	waitKey(0);
	return 0;
}

分离证件照中的头像

#include "pch.h"
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat watershedCluster(Mat &image, int &numSegments);
void createDisplaySegments(Mat &segments, int numSegments, Mat &image);
int main(int argc, char** argv) {
	Mat src = imread("D:/opencv/图像分割/代码与图片/cvtest.png");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", CV_WINDOW_AUTOSIZE);
	imshow("input image", src);

	int numSegments;
	Mat markers = watershedCluster(src, numSegments);
	createDisplaySegments(markers, numSegments, src);
	waitKey(0);
	return 0;
}

Mat watershedCluster(Mat &image, int &numComp) {
	// 二值化
	Mat gray, binary;
	cvtColor(image, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
	// 形态学与距离变换
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(binary, binary, MORPH_OPEN, k, Point(-1, -1));
	Mat dist;
	distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
	normalize(dist, dist, 0.0, 1.0, NORM_MINMAX);

	// 开始生成标记
	threshold(dist, dist, 0.1, 1.0, THRESH_BINARY);
	normalize(dist, dist, 0, 255, NORM_MINMAX);
	dist.convertTo(dist, CV_8UC1);

	// 标记开始
	vector<vector<Point>> contours;
	vector<Vec4i> hireachy;
	findContours(dist, contours, hireachy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
	if (contours.empty()) {
		return Mat();
	}

	Mat markers(dist.size(), CV_32S);
	markers = Scalar::all(0);
	for (int i = 0; i < contours.size(); i++) {
		drawContours(markers, contours, i, Scalar(i + 1), -1, 8, hireachy, INT_MAX);
	}
	circle(markers, Point(5, 5), 3, Scalar(255), -1);

	// 分水岭变换
	watershed(image, markers);
	numComp = contours.size();
	return markers;
}

void createDisplaySegments(Mat &markers, int numSegments, Mat &image) {
	// generate random color
	vector<Vec3b> colors;
	for (size_t i = 0; i < numSegments; i++) {
		int r = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int b = theRNG().uniform(0, 255);
		colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}

	// 颜色填充与最终显示
	Mat dst = Mat::zeros(markers.size(), CV_8UC3);
	int index = 0;
	for (int row = 0; row < markers.rows; row++) {
		for (int col = 0; col < markers.cols; col++) {
			index = markers.at<int>(row, col);
			if (index > 0 && index <= numSegments) {
				dst.at<Vec3b>(row, col) = colors[index - 1];
			}
			else {
				dst.at<Vec3b>(row, col) = Vec3b(255, 255, 255);
			}
		}
	}
	imshow("分水岭图像分割-演示", dst);
	return;
}

Grabcut原理与演示(二维抠图算法,可交互)

受专利影响,边缘处理不够好
在这里插入图片描述

#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>

using namespace cv;
using namespace std;

int numRun = 0;
Rect rect;
bool init = false;
Mat src, image;
Mat mask, bgModel, fgModel;
const char* winTitle = "input image";

void onMouse(int event, int x, int y, int flags, void* param);
void setROIMask();
void showImage();
void runGrabCut();
int main(int argc, char** argv) {
	src = imread("D:/vcprojects/images/flower.png", 1);
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	mask.create(src.size(), CV_8UC1);
	mask.setTo(Scalar::all(GC_BGD));

	namedWindow(winTitle, CV_WINDOW_AUTOSIZE);
	setMouseCallback(winTitle, onMouse, 0);
	imshow(winTitle, src);

	while (true) {
		char c = (char)waitKey(0);
		if (c == 'n') {
			runGrabCut();
			numRun++;
			showImage();
			printf("current iteative times : %d\n", numRun);
		}
		if ((int)c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void showImage() {
	Mat result, binMask;
	binMask.create(mask.size(), CV_8UC1);
	binMask = mask & 1;
	if (init) {
		src.copyTo(result, binMask);
	} else {
		src.copyTo(result);
	}
	rectangle(result, rect, Scalar(0, 0, 255), 2, 8);
	imshow(winTitle, result);
}

void setROIMask() {
	// GC_FGD = 1  前景
	// GC_BGD =0;  背景
	// GC_PR_FGD = 3  可能性前景
	// GC_PR_BGD = 2  可能性背景
	mask.setTo(GC_BGD);
	rect.x = max(0, rect.x);//防止数组越界
	rect.y = max(0, rect.y);//防止数组越界
	rect.width = min(rect.width, src.cols - rect.x);//防止数组越界
	rect.height = min(rect.height, src.rows - rect.y); //防止数组越界
	mask(rect).setTo(Scalar(GC_PR_FGD));
}

void onMouse(int event, int x, int y, int flags, void* param) {
	switch (event)
	{
	case EVENT_LBUTTONDOWN:  //鼠标左键按下
		rect.x = x;
		rect.y = y;
		rect.width = 1;
		rect.height = 1;
		init = false;
		numRun = 0;
		break;
	case EVENT_MOUSEMOVE:  //鼠标移动
		if (flags & EVENT_FLAG_LBUTTON) {
			rect = Rect(Point(rect.x, rect.y), Point(x, y));
			showImage();
		}
		break;
	case EVENT_LBUTTONUP:  //左键抬起
		if (rect.width > 1 && rect.height > 1) {
			setROIMask();
			showImage();
		}
		break;
	default:
		break;
	}
}

void runGrabCut() {
	if (rect.width < 2 || rect.height < 2) {
		return;
	}
	
	if (init) {
		grabCut(src, mask, rect, bgModel, fgModel, 1);
	} {
		grabCut(src, mask, rect, bgModel, fgModel, 1, GC_INIT_WITH_RECT);
		init = true;
	}
}

证件照背景替换案例

在这里插入图片描述

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat mat_to_samples(Mat &image);
int main(int argc, char** argv) {
	Mat src = imread("D:/vcprojects/images/toux.jpg");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", CV_WINDOW_AUTOSIZE);
	imshow("input image", src);

	// 组装数据
	Mat points = mat_to_samples(src);

	// 运行KMeans
	int numCluster = 4;
	Mat labels;
	Mat centers;
	TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1); //终止条件
	kmeans(points, numCluster, labels, criteria, 3, KMEANS_PP_CENTERS, centers);

	// 去背景+遮罩生成
	Mat mask=Mat::zeros(src.size(), CV_8UC1);
	int index = src.rows*2 + 2;
	int cindex = labels.at<int>(index, 0);
	int height = src.rows;
	int width = src.cols;
	//Mat dst;
	//src.copyTo(dst);
	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			index = row*width + col;
			int label = labels.at<int>(index, 0);
			if (label == cindex) { // 背景都为0
				//dst.at<Vec3b>(row, col)[0] = 0;
				//dst.at<Vec3b>(row, col)[1] = 0;
				//dst.at<Vec3b>(row, col)[2] = 0;
				mask.at<uchar>(row, col) = 0; 
			} else {//前景都为255
				mask.at<uchar>(row, col) = 255;
			}
		}
	}
	//imshow("mask", mask);

	// 腐蚀 + 高斯模糊
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	erode(mask, mask, k);
	//imshow("erode-mask", mask);
	GaussianBlur(mask, mask, Size(3, 3), 0, 0);
	//imshow("Blur Mask", mask);

	// 通道混合
	RNG rng(12345);
	Vec3b color;
	color[0] = 217;//rng.uniform(0, 255);
	color[1] = 60;// rng.uniform(0, 255);
	color[2] = 160;// rng.uniform(0, 255);
	Mat result(src.size(), src.type());

	double w = 0.0;
	int b = 0, g = 0, r = 0;
	int b1 = 0, g1 = 0, r1 = 0;
	int b2 = 0, g2 = 0, r2 = 0;

	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			int m = mask.at<uchar>(row, col);
			if (m == 255) {
				result.at<Vec3b>(row, col) = src.at<Vec3b>(row, col); // 前景
			}
			else if (m == 0) {
				result.at<Vec3b>(row, col) = color; // 背景
			} 
			else {
				w = m / 255.0;
				b1 = src.at<Vec3b>(row, col)[0];
				g1 = src.at<Vec3b>(row, col)[1];
				r1 = src.at<Vec3b>(row, col)[2];

				b2 = color[0];
				g2 = color[1];
				r2 = color[2];

				b = b1*w + b2*(1.0 - w);
				g = g1*w + g2*(1.0 - w);
				r = r1*w + r2*(1.0 - w);

				result.at<Vec3b>(row, col)[0] = b;
				result.at<Vec3b>(row, col)[1] = g;
				result.at<Vec3b>(row, col)[2] = r;
			}
		}
	}
	imshow("背景替换", result);

	waitKey(0);
	return 0;
}

Mat mat_to_samples(Mat &image) {
	int w = image.cols;
	int h = image.rows;
	int samplecount = w*h;
	int dims = image.channels();
	Mat points(samplecount, dims, CV_32F, Scalar(10));

	int index = 0;
	for (int row = 0; row < h; row++) {
		for (int col = 0; col < w; col++) {
			index = row*w + col;
			Vec3b bgr = image.at<Vec3b>(row, col);
			points.at<float>(index, 0) = static_cast<int>(bgr[0]);
			points.at<float>(index, 1) = static_cast<int>(bgr[1]);
			points.at<float>(index, 2) = static_cast<int>(bgr[2]);
		}
	}
	return points;
}

绿幕背景视频抠图

在这里插入图片描述

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat replace_and_blend(Mat &frame, Mat &mask);
Mat background_01;
Mat background_02;
int main(int argc, char** argv) {
	// start here...	
	background_01 = imread("D:/vcprojects/images/bg_01.jpg");
	background_02 = imread("D:/vcprojects/images/bg_02.jpg");
	VideoCapture capture;
	capture.open("D:/vcprojects/images/01.mp4");
	if (!capture.isOpened()) {
		printf("could not find the video file...\n");
		return -1;
	}
	char* title = "input video";
	char* resultWin = "result video";
	namedWindow(title, CV_WINDOW_AUTOSIZE);
	namedWindow(resultWin, CV_WINDOW_AUTOSIZE);
	Mat frame, hsv, mask;
	int count = 0;
	while (capture.read(frame)) {
		cvtColor(frame, hsv, COLOR_BGR2HSV);
		inRange(hsv, Scalar(35, 43, 46), Scalar(155, 255, 255), mask);
		// 形态学操作
		Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
		morphologyEx(mask, mask, MORPH_CLOSE, k);
		erode(mask, mask, k);
		GaussianBlur(mask, mask, Size(3, 3), 0, 0);

		Mat result = replace_and_blend(frame, mask);
		char c = waitKey(1);
		if (c == 27) {
			break;
		}
		imshow(resultWin, result);
		imshow(title, frame);
	}

	waitKey(0);
	return 0;
}

Mat replace_and_blend(Mat &frame, Mat &mask) {
	Mat result = Mat::zeros(frame.size(), frame.type());
	int h = frame.rows;
	int w = frame.cols;
	int dims = frame.channels();

	// replace and blend
	int m = 0;
	double wt = 0;

	int r = 0, g = 0, b = 0;
	int r1 = 0, g1 = 0, b1 = 0;
	int r2 = 0, g2 = 0, b2 = 0;

	for (int row = 0; row < h; row++) {
		uchar* current = frame.ptr<uchar>(row);  //当前数据
		uchar* bgrow = background_02.ptr<uchar>(row);  //要融合的数据
		uchar* maskrow = mask.ptr<uchar>(row);   //mask数据
		uchar* targetrow = result.ptr<uchar>(row);  //目标数据 
		for (int col = 0; col < w; col++) {
			m = *maskrow++;
			if (m == 255) { // 背景
				*targetrow++ = *bgrow++;
				*targetrow++ = *bgrow++;
				*targetrow++ = *bgrow++;
				current += 3;

			} else if(m==0) {// 前景
				*targetrow++ = *current++;
				*targetrow++ = *current++;
				*targetrow++ = *current++;
				bgrow += 3;
			} else {
				b1 = *bgrow++;
				g1 = *bgrow++;
				r1 = *bgrow++;

				b2 = *current++;
				g2 = *current++;
				r2 = *current++;

				// 权重
				wt = m / 255.0;
				
				// 混合
				b = b1*wt + b2*(1.0 - wt);
				g = g1*wt + g2*(1.0 - wt);
				r = r1*wt + r2*(1.0 - wt);

				*targetrow++ = b;
				*targetrow++ = g;
				*targetrow++ = r;
			}
		}
	}

	return result;
}
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Opencv中的分水岭算法(watershed algorithm)是一种自动图像分割算法,常用于分离图像中的前景和背景。下面是使用Opencv中的分水岭算法进行图像分割的步骤: 1. 读入图像并进行预处理,如去噪、灰度化等。 2. 对图像进行二值化处理,得到前景和背景的二值图像。 3. 对二值图像进行距离变换(distance transform),得到每个像素离最近的背景像素的距离。 4. 对距离变换后的图像进行阈值处理,得到图像的分水岭标记(watershed markers)。 5. 对分水岭标记进行修正,确保标记不会重叠或出现空洞。 6. 对分水岭标记应用分水岭算法,得到分割后的图像。 下面是使用Opencv实现分水岭算法的示例代码: ```python import cv2 import numpy as np # 读取图像并进行预处理 img = cv2.imread('input.jpg') gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # 进行距离变换 dist_transform = cv2.distanceTransform(thresh,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0) # 获取分水岭标记 sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(thresh,sure_fg) ret, markers = cv2.connectedComponents(sure_fg) markers = markers+1 markers[unknown==255] = 0 # 修正分水岭标记 markers = cv2.watershed(img,markers) img[markers == -1] = [255,0,0] # 显示分割结果 cv2.imshow('result', img) cv2.waitKey(0) cv2.destroyAllWindows() ``` 这段代码中,我们首先读入一张图像并进行预处理,然后进行距离变换,得到每个像素到最近的背景像素的距离。接着使用阈值处理得到分水岭标记,修正分水岭标记并应用分水岭算法,最后显示分割结果。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值