基于距离的分水岭分割
从背景中分离硬币和药片并计数
#include "pch.h"
#include <iostream>
#include<opencv2/opencv.hpp>
#include"opencv2/highgui/highgui.hpp"
#include"opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
Mat src = imread("D:/opencv/图像分割/代码与图片/coins_001.jpg");
if (src.empty()) {
printf("could not load image...\n");
return -1;
}
namedWindow("input image", CV_WINDOW_NORMAL);
imshow("input image", src);
Mat gray, binary, shifted;
pyrMeanShiftFiltering(src, shifted, 21, 51);//边缘保留
//imshow("shifted", shifted);
cvtColor(shifted, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
//imshow("binary", binary);
// distance transform 距离变换
Mat dist;
distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
//归一化
normalize(dist, dist, 0, 1, NORM_MINMAX);
//imshow("distance result", dist);
// binary 选择0.4--1的范围进行二值化
threshold(dist, dist, 0.4, 1, THRESH_BINARY);
//imshow("distance binary", dist);
// markers convertTo矩阵数据类型转换
Mat dist_m;
dist.convertTo(dist_m, CV_8U);
vector<vector<Point>> contours;
//寻找最外界轮廓
findContours(dist_m, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
// create markers
Mat markers = Mat::zeros(src.size(), CV_32SC1);
for (size_t t = 0; t < contours.size(); t++) {
drawContours(markers, contours, static_cast<int>(t), Scalar::all(static_cast<int>(t) + 1), -1);
}
circle(markers, Point(5, 5), 3, Scalar(255), -1);
//imshow("markers", markers*10000);
// 形态学操作(腐蚀) - 彩色图像,目的是去掉干扰,让结果更好
Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
morphologyEx(src, src, MORPH_ERODE, k);
// 完成分水岭变换
watershed(src, markers);
Mat mark = Mat::zeros(markers.size(), CV_8UC1);
markers.convertTo(mark, CV_8UC1);
bitwise_not(mark, mark, Mat());
//imshow("watershed result", mark);
// generate random color 形成随机颜色
vector<Vec3b> colors;
for (size_t i = 0; i < contours.size(); i++) {
int r = theRNG().uniform(0, 255);
int g = theRNG().uniform(0, 255);
int b = theRNG().uniform(0, 255);
colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
}
// 颜色填充与最终显示
Mat dst = Mat::zeros(markers.size(), CV_8UC3);
int index = 0;
for (int row = 0; row < markers.rows; row++) {
for (int col = 0; col < markers.cols; col++) {
index = markers.at<int>(row, col);
if (index > 0 && index <= contours.size()) {
dst.at<Vec3b>(row, col) = colors[index - 1];
}
else {
dst.at<Vec3b>(row, col) = Vec3b(0, 0, 0);
}
}
}
namedWindow("output image", CV_WINDOW_NORMAL);
imshow("output image", dst);
printf("number of objects : %d\n", contours.size());
waitKey(0);
return 0;
}
分离证件照中的头像
#include "pch.h"
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat watershedCluster(Mat &image, int &numSegments);
void createDisplaySegments(Mat &segments, int numSegments, Mat &image);
int main(int argc, char** argv) {
Mat src = imread("D:/opencv/图像分割/代码与图片/cvtest.png");
if (src.empty()) {
printf("could not load image...\n");
return -1;
}
namedWindow("input image", CV_WINDOW_AUTOSIZE);
imshow("input image", src);
int numSegments;
Mat markers = watershedCluster(src, numSegments);
createDisplaySegments(markers, numSegments, src);
waitKey(0);
return 0;
}
Mat watershedCluster(Mat &image, int &numComp) {
// 二值化
Mat gray, binary;
cvtColor(image, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
// 形态学与距离变换
Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
morphologyEx(binary, binary, MORPH_OPEN, k, Point(-1, -1));
Mat dist;
distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
normalize(dist, dist, 0.0, 1.0, NORM_MINMAX);
// 开始生成标记
threshold(dist, dist, 0.1, 1.0, THRESH_BINARY);
normalize(dist, dist, 0, 255, NORM_MINMAX);
dist.convertTo(dist, CV_8UC1);
// 标记开始
vector<vector<Point>> contours;
vector<Vec4i> hireachy;
findContours(dist, contours, hireachy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
if (contours.empty()) {
return Mat();
}
Mat markers(dist.size(), CV_32S);
markers = Scalar::all(0);
for (int i = 0; i < contours.size(); i++) {
drawContours(markers, contours, i, Scalar(i + 1), -1, 8, hireachy, INT_MAX);
}
circle(markers, Point(5, 5), 3, Scalar(255), -1);
// 分水岭变换
watershed(image, markers);
numComp = contours.size();
return markers;
}
void createDisplaySegments(Mat &markers, int numSegments, Mat &image) {
// generate random color
vector<Vec3b> colors;
for (size_t i = 0; i < numSegments; i++) {
int r = theRNG().uniform(0, 255);
int g = theRNG().uniform(0, 255);
int b = theRNG().uniform(0, 255);
colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
}
// 颜色填充与最终显示
Mat dst = Mat::zeros(markers.size(), CV_8UC3);
int index = 0;
for (int row = 0; row < markers.rows; row++) {
for (int col = 0; col < markers.cols; col++) {
index = markers.at<int>(row, col);
if (index > 0 && index <= numSegments) {
dst.at<Vec3b>(row, col) = colors[index - 1];
}
else {
dst.at<Vec3b>(row, col) = Vec3b(255, 255, 255);
}
}
}
imshow("分水岭图像分割-演示", dst);
return;
}
Grabcut原理与演示(二维抠图算法,可交互)
受专利影响,边缘处理不够好
#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>
using namespace cv;
using namespace std;
int numRun = 0;
Rect rect;
bool init = false;
Mat src, image;
Mat mask, bgModel, fgModel;
const char* winTitle = "input image";
void onMouse(int event, int x, int y, int flags, void* param);
void setROIMask();
void showImage();
void runGrabCut();
int main(int argc, char** argv) {
src = imread("D:/vcprojects/images/flower.png", 1);
if (src.empty()) {
printf("could not load image...\n");
return -1;
}
mask.create(src.size(), CV_8UC1);
mask.setTo(Scalar::all(GC_BGD));
namedWindow(winTitle, CV_WINDOW_AUTOSIZE);
setMouseCallback(winTitle, onMouse, 0);
imshow(winTitle, src);
while (true) {
char c = (char)waitKey(0);
if (c == 'n') {
runGrabCut();
numRun++;
showImage();
printf("current iteative times : %d\n", numRun);
}
if ((int)c == 27) {
break;
}
}
waitKey(0);
return 0;
}
void showImage() {
Mat result, binMask;
binMask.create(mask.size(), CV_8UC1);
binMask = mask & 1;
if (init) {
src.copyTo(result, binMask);
} else {
src.copyTo(result);
}
rectangle(result, rect, Scalar(0, 0, 255), 2, 8);
imshow(winTitle, result);
}
void setROIMask() {
// GC_FGD = 1 前景
// GC_BGD =0; 背景
// GC_PR_FGD = 3 可能性前景
// GC_PR_BGD = 2 可能性背景
mask.setTo(GC_BGD);
rect.x = max(0, rect.x);//防止数组越界
rect.y = max(0, rect.y);//防止数组越界
rect.width = min(rect.width, src.cols - rect.x);//防止数组越界
rect.height = min(rect.height, src.rows - rect.y); //防止数组越界
mask(rect).setTo(Scalar(GC_PR_FGD));
}
void onMouse(int event, int x, int y, int flags, void* param) {
switch (event)
{
case EVENT_LBUTTONDOWN: //鼠标左键按下
rect.x = x;
rect.y = y;
rect.width = 1;
rect.height = 1;
init = false;
numRun = 0;
break;
case EVENT_MOUSEMOVE: //鼠标移动
if (flags & EVENT_FLAG_LBUTTON) {
rect = Rect(Point(rect.x, rect.y), Point(x, y));
showImage();
}
break;
case EVENT_LBUTTONUP: //左键抬起
if (rect.width > 1 && rect.height > 1) {
setROIMask();
showImage();
}
break;
default:
break;
}
}
void runGrabCut() {
if (rect.width < 2 || rect.height < 2) {
return;
}
if (init) {
grabCut(src, mask, rect, bgModel, fgModel, 1);
} {
grabCut(src, mask, rect, bgModel, fgModel, 1, GC_INIT_WITH_RECT);
init = true;
}
}
证件照背景替换案例
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat mat_to_samples(Mat &image);
int main(int argc, char** argv) {
Mat src = imread("D:/vcprojects/images/toux.jpg");
if (src.empty()) {
printf("could not load image...\n");
return -1;
}
namedWindow("input image", CV_WINDOW_AUTOSIZE);
imshow("input image", src);
// 组装数据
Mat points = mat_to_samples(src);
// 运行KMeans
int numCluster = 4;
Mat labels;
Mat centers;
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1); //终止条件
kmeans(points, numCluster, labels, criteria, 3, KMEANS_PP_CENTERS, centers);
// 去背景+遮罩生成
Mat mask=Mat::zeros(src.size(), CV_8UC1);
int index = src.rows*2 + 2;
int cindex = labels.at<int>(index, 0);
int height = src.rows;
int width = src.cols;
//Mat dst;
//src.copyTo(dst);
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
index = row*width + col;
int label = labels.at<int>(index, 0);
if (label == cindex) { // 背景都为0
//dst.at<Vec3b>(row, col)[0] = 0;
//dst.at<Vec3b>(row, col)[1] = 0;
//dst.at<Vec3b>(row, col)[2] = 0;
mask.at<uchar>(row, col) = 0;
} else {//前景都为255
mask.at<uchar>(row, col) = 255;
}
}
}
//imshow("mask", mask);
// 腐蚀 + 高斯模糊
Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
erode(mask, mask, k);
//imshow("erode-mask", mask);
GaussianBlur(mask, mask, Size(3, 3), 0, 0);
//imshow("Blur Mask", mask);
// 通道混合
RNG rng(12345);
Vec3b color;
color[0] = 217;//rng.uniform(0, 255);
color[1] = 60;// rng.uniform(0, 255);
color[2] = 160;// rng.uniform(0, 255);
Mat result(src.size(), src.type());
double w = 0.0;
int b = 0, g = 0, r = 0;
int b1 = 0, g1 = 0, r1 = 0;
int b2 = 0, g2 = 0, r2 = 0;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int m = mask.at<uchar>(row, col);
if (m == 255) {
result.at<Vec3b>(row, col) = src.at<Vec3b>(row, col); // 前景
}
else if (m == 0) {
result.at<Vec3b>(row, col) = color; // 背景
}
else {
w = m / 255.0;
b1 = src.at<Vec3b>(row, col)[0];
g1 = src.at<Vec3b>(row, col)[1];
r1 = src.at<Vec3b>(row, col)[2];
b2 = color[0];
g2 = color[1];
r2 = color[2];
b = b1*w + b2*(1.0 - w);
g = g1*w + g2*(1.0 - w);
r = r1*w + r2*(1.0 - w);
result.at<Vec3b>(row, col)[0] = b;
result.at<Vec3b>(row, col)[1] = g;
result.at<Vec3b>(row, col)[2] = r;
}
}
}
imshow("背景替换", result);
waitKey(0);
return 0;
}
Mat mat_to_samples(Mat &image) {
int w = image.cols;
int h = image.rows;
int samplecount = w*h;
int dims = image.channels();
Mat points(samplecount, dims, CV_32F, Scalar(10));
int index = 0;
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row*w + col;
Vec3b bgr = image.at<Vec3b>(row, col);
points.at<float>(index, 0) = static_cast<int>(bgr[0]);
points.at<float>(index, 1) = static_cast<int>(bgr[1]);
points.at<float>(index, 2) = static_cast<int>(bgr[2]);
}
}
return points;
}
绿幕背景视频抠图
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat replace_and_blend(Mat &frame, Mat &mask);
Mat background_01;
Mat background_02;
int main(int argc, char** argv) {
// start here...
background_01 = imread("D:/vcprojects/images/bg_01.jpg");
background_02 = imread("D:/vcprojects/images/bg_02.jpg");
VideoCapture capture;
capture.open("D:/vcprojects/images/01.mp4");
if (!capture.isOpened()) {
printf("could not find the video file...\n");
return -1;
}
char* title = "input video";
char* resultWin = "result video";
namedWindow(title, CV_WINDOW_AUTOSIZE);
namedWindow(resultWin, CV_WINDOW_AUTOSIZE);
Mat frame, hsv, mask;
int count = 0;
while (capture.read(frame)) {
cvtColor(frame, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(35, 43, 46), Scalar(155, 255, 255), mask);
// 形态学操作
Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
morphologyEx(mask, mask, MORPH_CLOSE, k);
erode(mask, mask, k);
GaussianBlur(mask, mask, Size(3, 3), 0, 0);
Mat result = replace_and_blend(frame, mask);
char c = waitKey(1);
if (c == 27) {
break;
}
imshow(resultWin, result);
imshow(title, frame);
}
waitKey(0);
return 0;
}
Mat replace_and_blend(Mat &frame, Mat &mask) {
Mat result = Mat::zeros(frame.size(), frame.type());
int h = frame.rows;
int w = frame.cols;
int dims = frame.channels();
// replace and blend
int m = 0;
double wt = 0;
int r = 0, g = 0, b = 0;
int r1 = 0, g1 = 0, b1 = 0;
int r2 = 0, g2 = 0, b2 = 0;
for (int row = 0; row < h; row++) {
uchar* current = frame.ptr<uchar>(row); //当前数据
uchar* bgrow = background_02.ptr<uchar>(row); //要融合的数据
uchar* maskrow = mask.ptr<uchar>(row); //mask数据
uchar* targetrow = result.ptr<uchar>(row); //目标数据
for (int col = 0; col < w; col++) {
m = *maskrow++;
if (m == 255) { // 背景
*targetrow++ = *bgrow++;
*targetrow++ = *bgrow++;
*targetrow++ = *bgrow++;
current += 3;
} else if(m==0) {// 前景
*targetrow++ = *current++;
*targetrow++ = *current++;
*targetrow++ = *current++;
bgrow += 3;
} else {
b1 = *bgrow++;
g1 = *bgrow++;
r1 = *bgrow++;
b2 = *current++;
g2 = *current++;
r2 = *current++;
// 权重
wt = m / 255.0;
// 混合
b = b1*wt + b2*(1.0 - wt);
g = g1*wt + g2*(1.0 - wt);
r = r1*wt + r2*(1.0 - wt);
*targetrow++ = b;
*targetrow++ = g;
*targetrow++ = r;
}
}
}
return result;
}