计算机视觉基础Task05

最大类间方差法(大津阈值法)

大津法(OTSU)是一种确定图像二值化分割阈值的算法,由日本学者大津于1979年提出。从大津法的原理上来讲,该方法又称作最大类间方差法,因为按照大津法求得的阈值进行图像二值化分割后,前景与背景图像的类间方差最大。

它被认为是图像分割中阈值选取的最佳算法,计算简单,不受图像亮度和对比度的影响,因此在数字图像处理上得到了广泛的应用。它是按图像的灰度特性,将图像分成背景和前景两部分。因方差是灰度分布均匀性的一种度量,背景和前景之间的类间方差越大,说明构成图像的两部分的差别越大,当部分前景错分为背景或部分背景错分为前景都会导致两部分差别变小。因此,使类间方差最大的分割意味着错分概率最小。

OTSU算法的假设是存在阈值TH将图像所有像素分为两类C1(小于TH)和C2(大于TH),则这两类像素各自的均值就为m1、m2,图像全局均值为mG。同时像素被分为C1和C2类的概率分别为p1、p2。因此就有:
在这里插入图片描述
式(4)还可以进一步变形:
在这里插入图片描述

基于OpenCV的实现:

1.最大类间方差法

double cv::threshold	(	InputArray 	src,
                           OutputArray 	dst,
                             double 	thresh,
                             double 	maxval,
                               int      type 
)		

参数:

src — input array (single-channel, 8-bit or 32-bit floating point).
dst — output array of the same size and type as src.
thresh — threshold value.
maxval — maximum value to use with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
type — thresholding type 参考:thresholdType

2.自适应阈值

void adaptiveThreshold(InputArray src, OutputArray dst, 
                             double maxValue,
                             int adaptiveMethod,
                             int thresholdType, 
                             int blockSize, double C)

参数: Parameters

src — Source 8-bit single-channel image.
dst — Destination image of the same size and the same type as src.
maxValue — Non-zero value assigned to the pixels for which the condition is satisfied
adaptiveMethod — Adaptive thresholding algorithm to use,参考:cv::AdaptiveThresholdTypes
thresholdType — Thresholding type that must be either THRESH_BINARY or THRESH_BINARY_INV, 可参考:thresholdType blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
C — Constant subtracted from the mean or weighted mean (see the details below). Normally, it is positive but may be zero or negative as well.

#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
 
int main(int argc, char* argv[])
{
	Mat img = imread(argv[1], -1);
	if (img.empty())
	{
		cout <<"Error: Could not load image" <<endl;
		return 0;
	}
 
	Mat gray;
	cvtColor(img, gray, CV_BGR2GRAY);
 
	Mat dst;
	threshold(gray, dst, 0, 255, CV_THRESH_OTSU);
 
	imshow("src", img);
	imshow("gray", gray);
	imshow("dst", dst);
	waitKey(0);
 
	return 0;
}
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
 
int main(int argc, char* argv[])
{
	Mat img = imread(argv[1], -1);
	if (img.empty())
	{
		cout <<"Error: Could not load image" <<endl;
		return 0;
	}
 
	Mat gray;
	cvtColor(img, gray, CV_BGR2GRAY);
 
	Mat dst;
	cv::adaptiveThreshold(gray,, dst, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 21, 10);;
 
	imshow("src", img);
	imshow("gray", gray);
	imshow("dst", dst);
	waitKey(0);
 
	return 0;
}

根据原理自己实现

#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
 
int Otsu(cv::Mat& src, cv::Mat& dst, int thresh){
	const int Grayscale = 256;
	int graynum[Grayscale] = { 0 };
	int r = src.rows;
	int c = src.cols;
	for (int i = 0; i < r; ++i){
		const uchar* ptr = src.ptr<uchar>(i);
		for (int j = 0; j < c; ++j){        //直方图统计
			graynum[ptr[j]]++;
		}
	}
 
    double P[Grayscale] = { 0 };   
	double PK[Grayscale] = { 0 };
	double MK[Grayscale] = { 0 };
	double srcpixnum = r*c, sumtmpPK = 0, sumtmpMK = 0;
	for (int i = 0; i < Grayscale; ++i){
		P[i] = graynum[i] / srcpixnum;   //每个灰度级出现的概率
		PK[i] = sumtmpPK + P[i];         //概率累计和 
		sumtmpPK = PK[i];
		MK[i] = sumtmpMK + i*P[i];       //灰度级的累加均值                                                                                                                                                                                                                                                                                                                                                                                                        
		sumtmpMK = MK[i];
	}
	
	//计算类间方差
	double Var=0;
	for (int k = 0; k < Grayscale; ++k){
		if ((MK[Grayscale-1] * PK[k] - MK[k])*(MK[Grayscale-1] * PK[k] - MK[k]) / (PK[k] * (1 - PK[k])) > Var){
			Var = (MK[Grayscale-1] * PK[k] - MK[k])*(MK[Grayscale-1] * PK[k] - MK[k]) / (PK[k] * (1 - PK[k]));
			thresh = k;
		}
	}
 
	//阈值处理
	src.copyTo(dst);
	for (int i = 0; i < r; ++i){
	    uchar* ptr = dst.ptr<uchar>(i);
		for (int j = 0; j < c; ++j){
			if (ptr[j]> thresh)
				ptr[j] = 255;
			else
				ptr[j] = 0;
		}
	}
	return thresh;
}
 
 
int main(){
	cv::Mat src = cv::imread("I:\\Learning-and-Practice\\2019Change\\Image process algorithm\\Img\\Fig1039(a)(polymersomes).tif");
	if (src.empty()){
		return -1;
	}
	if (src.channels() > 1)
		cv::cvtColor(src, src, CV_RGB2GRAY);
 
	cv::Mat dst,dst2;
	int thresh=0;
	double t2 = (double)cv::getTickCount();
	thresh=Otsu(src , dst, thresh); //Otsu
	std::cout << "Mythresh=" << thresh << std::endl;
	t2 = (double)cv::getTickCount() - t2;
	double time2 = (t2 *1000.) / ((double)cv::getTickFrequency());
	std::cout << "my_process=" << time2 << " ms. " << std::endl << std::endl;
    double  Otsu = 0;
 
	Otsu=cv::threshold(src, dst2, Otsu, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
	std::cout << "OpenCVthresh=" << Otsu << std::endl;
 
	cv::namedWindow("src", CV_WINDOW_NORMAL);
	cv::imshow("src", src);
	cv::namedWindow("dst", CV_WINDOW_NORMAL);
	cv::imshow("dst", dst);
	cv::namedWindow("dst2", CV_WINDOW_NORMAL);
	cv::imshow("dst2", dst2);
	//cv::imwrite("I:\\Learning-and-Practice\\2019Change\\Image process algorithm\\Image Filtering\\MeanFilter\\TXT.jpg",dst);
	cv::waitKey(0);
}
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
 
enum adaptiveMethod{meanFilter,gaaussianFilter,medianFilter};
 
void AdaptiveThreshold(cv::Mat& src, cv::Mat& dst, double Maxval, int Subsize, double c, adaptiveMethod method = meanFilter){
 
	if (src.channels() > 1)
		cv::cvtColor(src, src, CV_RGB2GRAY);
 
	cv::Mat smooth;
	switch (method)
	{
	case  meanFilter:
		cv::blur(src, smooth, cv::Size(Subsize, Subsize));  //均值滤波
		break;
	case gaaussianFilter:
		cv::GaussianBlur(src, smooth, cv::Size(Subsize, Subsize),0,0); //高斯滤波
		break;
	case medianFilter:
		cv::medianBlur(src, smooth, Subsize);   //中值滤波
		break;
	default:
		break;
	}
 
	smooth = smooth - c;
	
	//阈值处理
	src.copyTo(dst);
	for (int r = 0; r < src.rows;++r){
		const uchar* srcptr = src.ptr<uchar>(r);
		const uchar* smoothptr = smooth.ptr<uchar>(r);
		uchar* dstptr = dst.ptr<uchar>(r);
		for (int c = 0; c < src.cols; ++c){
			if (srcptr[c]>smoothptr[c]){
				dstptr[c] = Maxval;
			}
			else
				dstptr[c] = 0;
		}
	}
 
}
 
int main(){
	cv::Mat src = cv::imread("I:\\Learning-and-Practice\\2019Change\\Image process algorithm\\Img\\Fig1049(a)(spot_shaded_text_image).tif");
	if (src.empty()){
		return -1;
	}
	if (src.channels() > 1)
		cv::cvtColor(src, src, CV_RGB2GRAY);
 
	cv::Mat dst, dst2;
	double t2 = (double)cv::getTickCount();
	AdaptiveThreshold(src, dst, 255, 21, 10, meanFilter);  //
	t2 = (double)cv::getTickCount() - t2;
	double time2 = (t2 *1000.) / ((double)cv::getTickFrequency());
	std::cout << "my_process=" << time2 << " ms. " << std::endl << std::endl;
 
 
	cv::adaptiveThreshold(src, dst2, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 21, 10);
 
 
	cv::namedWindow("src", CV_WINDOW_NORMAL);
	cv::imshow("src", src);
	cv::namedWindow("dst", CV_WINDOW_NORMAL);
	cv::imshow("dst", dst);
	cv::namedWindow("dst2", CV_WINDOW_NORMAL);
	cv::imshow("dst2", dst2);
	//cv::imwrite("I:\\Learning-and-Practice\\2019Change\\Image process algorithm\\Image Filtering\\MeanFilter\\TXT.jpg",dst);
	cv::waitKey(0);
}

效果图:
在这里插入图片描述
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值