opencv 低通滤波总结

<pre name="code" class="cpp">#include "StdAfx.h"
#include "cxcore.h"
#include "cv.h"
#include "highgui.h"


double D0=20;
 void ILPF(CvMat* src, const double D0)
{
	int i, j;
	int state = -1;
	double tempD;  
	long width, height;
	width = src->width;
	height = src->height;
    
	long x, y;
	x = width / 2;
	y = height / 2;


	CvMat* H_mat;
	H_mat = cvCreateMat(src->height,src->width, CV_64FC2);
	for(i = 0; i < height; i++)
	{
		for(j = 0; j < width; j++)
		{
			if(i > y && j > x)
			{
				state = 3;
			}
			else if(i > y)
			{
				state = 1;
			}
			else if(j > x)
			{
				state = 2;
			}
			else
			{
				state = 0;
			}
 
			switch(state)
			{
				case 0:
				tempD = (double)  (i * i + j * j);tempD = sqrt(tempD);break;
				case 1:
				tempD = (double)  ((height - i) * (height - i) + j * j);tempD = sqrt(tempD);break;
				case 2:
				tempD = (double)  (i * i + (width - j) * (width - j));tempD = sqrt(tempD);break;
				case 3:
				tempD = (double)  ((height - i) * (height - i) + (width - j) * (width - j));tempD = sqrt(tempD);break;
				default:
				break;
			}

	//二维高斯低通滤波器传递函数


			tempD = exp(-0.5 * pow(tempD / D0, 2));
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;


			//衰减系数为2的二维指数低通滤波器传递函数
			/* tempD = exp(-pow(tempD / D0, 2));   
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;*/


			//2阶巴特沃思低通滤波器传递函数
			/*tempD = 1 / (1 + pow(tempD / D0, 2 * 2));
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2] = tempD;
			((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;*/




			//二维理想低通滤波器传递函数
			// if(tempD <= D0)
			// {
			// ((double*)(H_mat->data.ptr + H_mat->step * i))[j *2] = 1.0;
			//      //((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;
			// }
			// else
			// {
			// ((double*)(H_mat->data.ptr + H_mat->step * i))[j*2 ] = 0.0;
			//      //((double*)(H_mat->data.ptr + H_mat->step * i))[j * 2 + 1] = 0.0;
			// }
		}
	}
	cvMulSpectrums(src, H_mat, src, CV_DXT_ROWS);
	cvReleaseMat(&H_mat);
  
}


int main(int argc, char ** argv)
{
    const char* filename = "E:\\work\\hh\\lena.jpg";
    IplImage * im;
 
    IplImage * realInput;
    IplImage * imaginaryInput;
    IplImage * complexInput;
    int dft_M, dft_N;
    CvMat* dft_A, tmp, *dft_B;
    IplImage * image_Re;
    IplImage * image_Im;
    double m, M;
 
    im = cvLoadImage( filename, CV_LOAD_IMAGE_GRAYSCALE );
    if( !im )
	{
        return -1;
	}
 
    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
 
    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
 
    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );
	dft_B = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
	cvZero(dft_B);


    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
 
    
    cvGetSubRect( dft_A,&tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
   
 
   
 
	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

	ILPF(dft_A,  D0);
	cvDFT( dft_A, dft_A, CV_DXT_INVERSE , complexInput->height );


    cvNamedWindow("win", 0);
    cvNamedWindow("magnitude", 0);
    cvShowImage("win", im);
 
    
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );
 
   
 
    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));




    cvShowImage("magnitude", image_Re);
 
    cvWaitKey(-1);
    return 0;
}


 

//next /

// Example : apply butterworth low pass filtering to input image/video
// usage: prog {<image_name> | <video_name>}

// Author : Toby Breckon, toby.breckon@cranfield.ac.uk

// Copyright (c) 2011 School of Engineering, Cranfield University
// License : LGPL - http://www.gnu.org/licenses/lgpl.html

#include <cv.h>   		// open cv general include file
#include <highgui.h>	// open cv GUI include file
#include <iostream>		// standard C++ I/O

using namespace cv; // OpenCV API is in the C++ "cv" namespace

/******************************************************************************/
// setup the cameras properly based on OS platform

// 0 in linux gives first camera for v4l
//-1 in windows gives first device or user dialog selection

#ifdef linux
	#define CAMERA_INDEX 0
#else
	#define CAMERA_INDEX -1
#endif
/******************************************************************************/
// Rearrange the quadrants of a Fourier image so that the origin is at
// the image center

void shiftDFT(Mat& fImage )
{
  	Mat tmp, q0, q1, q2, q3;

	// first crop the image, if it has an odd number of rows or columns

	fImage = fImage(Rect(0, 0, fImage.cols & -2, fImage.rows & -2));

	int cx = fImage.cols/2;
	int cy = fImage.rows/2;

	// rearrange the quadrants of Fourier image
	// so that the origin is at the image center

	q0 = fImage(Rect(0, 0, cx, cy));
	q1 = fImage(Rect(cx, 0, cx, cy));
	q2 = fImage(Rect(0, cy, cx, cy));
	q3 = fImage(Rect(cx, cy, cx, cy));

	q0.copyTo(tmp);
	q3.copyTo(q0);
	tmp.copyTo(q3);

	q1.copyTo(tmp);
	q2.copyTo(q1);
	tmp.copyTo(q2);
}

/******************************************************************************/
// return a floating point spectrum magnitude image scaled for user viewing
// complexImg- input dft (2 channel floating point, Real + Imaginary fourier image)
// rearrange - perform rearrangement of DFT quadrants if true

// return value - pointer to output spectrum magnitude image scaled for user viewing

Mat create_spectrum_magnitude_display(Mat& complexImg, bool rearrange)
{
    Mat planes[2];

    // compute magnitude spectrum (N.B. for display)
    // compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))

    split(complexImg, planes);
    magnitude(planes[0], planes[1], planes[0]);

    Mat mag = (planes[0]).clone();
    mag += Scalar::all(1);
    log(mag, mag);

    if (rearrange)
    {
        // re-arrange the quaderants
        shiftDFT(mag);
    }

    normalize(mag, mag, 0, 1, CV_MINMAX);

    return mag;

}
/******************************************************************************/

// create a 2-channel butterworth low-pass filter with radius D, order n
// (assumes pre-aollocated size of dft_Filter specifies dimensions)

void create_butterworth_lowpass_filter(Mat &dft_Filter, int D, int n)
{
	Mat tmp = Mat(dft_Filter.rows, dft_Filter.cols, CV_32F);

	Point centre = Point(dft_Filter.rows / 2, dft_Filter.cols / 2);
	double radius;

	// based on the forumla in the IP notes (p. 130 of 2009/10 version)
	// see also HIPR2 on-line

	for(int i = 0; i < dft_Filter.rows; i++)
	{
		for(int j = 0; j < dft_Filter.cols; j++)
		{
			radius = (double) sqrt(pow((i - centre.x), 2.0) + pow((double) (j - centre.y), 2.0));
			tmp.at<float>(i,j) = (float)
						( 1 / (1 + pow((double) (radius /  D), (double) (2 * n))));
		}
	}

    Mat toMerge[] = {tmp, tmp};
	merge(toMerge, 2, dft_Filter);
}

/******************************************************************************/

int main( int argc, char** argv )
{

  Mat img, imgGray, imgOutput;	// image object(s)
  VideoCapture cap; // capture object

  Mat padded;		// fourier image objects and arrays
  Mat complexImg, filter, filterOutput;
  Mat planes[2], mag;

  int N, M; // fourier image sizes

  int radius = 30;				// low pass filter parameter
  int order = 2;				// low pass filter parameter

  const string originalName = "Input Image (grayscale)"; // window name
  const string spectrumMagName = "Magnitude Image (log transformed)"; // window name
  const string lowPassName = "Butterworth Low Pass Filtered (grayscale)"; // window name
  const string filterName = "Filter Image"; // window nam

  bool keepProcessing = true;	// loop control flag
  int  key;						// user input
  int  EVENT_LOOP_DELAY = 40;	// delay for GUI window
                                // 40 ms equates to 1000ms/25fps = 40ms per frame

  // if command line arguments are provided try to read image/video_name
  // otherwise default to capture from attached H/W camera

    if(
	  ( argc == 2 && (!(img = imread( argv[1], CV_LOAD_IMAGE_COLOR)).empty()))||
	  ( argc == 2 && (cap.open(argv[1]) == true )) ||
	  ( argc != 2 && (cap.open(CAMERA_INDEX) == true))
	  )
    {
      // create window object (use flag=0 to allow resize, 1 to auto fix size)

      namedWindow(originalName, 0);
	  namedWindow(spectrumMagName, 0);
	  namedWindow(lowPassName, 0);
      namedWindow(filterName, 0);

        // if capture object in use (i.e. video/camera)
        // get image from capture object

		  if (cap.isOpened()) {

			  cap >> img;
			  if(img.empty()){
				if (argc == 2){
					std::cerr << "End of video file reached" << std::endl;
				} else {
					std::cerr << "ERROR: cannot get next fram from camera"
						      << std::endl;
				}
				exit(0);
			  }

		   }

      // setup the DFT image sizes

      M = getOptimalDFTSize( img.rows );
      N = getOptimalDFTSize( img.cols );

      // add adjustable trackbar for low pass filter threshold parameter

      createTrackbar("Radius", lowPassName, &radius, (min(M, N) / 2));
	  createTrackbar("Order", lowPassName, &order, 10);

	  // start main loop

	  while (keepProcessing) {

		  // if capture object in use (i.e. video/camera)
		  // get image from capture object

		  if (cap.isOpened()) {

			  cap >> img;
			  if(img.empty()){
				if (argc == 2){
					std::cerr << "End of video file reached" << std::endl;
				} else {
					std::cerr << "ERROR: cannot get next fram from camera"
						      << std::endl;
				}
				exit(0);
			  }

		  }

		  // ***

		    // convert input to grayscale

		  	cvtColor(img, imgGray, CV_BGR2GRAY);

			// setup the DFT images

	  		copyMakeBorder(imgGray, padded, 0, M - imgGray.rows, 0,
			      N - imgGray.cols, BORDER_CONSTANT, Scalar::all(0));
	  		planes[0] = Mat_<float>(padded);
		    planes[1] = Mat::zeros(padded.size(), CV_32F);

	  		merge(planes, 2, complexImg);

			// do the DFT

		    dft(complexImg, complexImg);

		    // construct the filter (same size as complex image)

		    filter = complexImg.clone();
		    create_butterworth_lowpass_filter(filter, radius, order);

		    // apply filter
		    shiftDFT(complexImg);
            mulSpectrums(complexImg, filter, complexImg, 0);
            shiftDFT(complexImg);

			// create magnitude spectrum for display

		    mag = create_spectrum_magnitude_display(complexImg, true);

            // do inverse DFT on filtered image

            idft(complexImg, complexImg);

            // split into planes and extract plane 0 as output image

            split(complexImg, planes);
            normalize(planes[0], imgOutput, 0, 1, CV_MINMAX);

            // do the same with the filter image

            split(filter, planes);
            normalize(planes[0], filterOutput, 0, 1, CV_MINMAX);

		  // ***

		  // display image in window

		  imshow(originalName, imgGray);
		  imshow(spectrumMagName, mag);
		  imshow(lowPassName, imgOutput);
		  imshow(filterName, filterOutput);

		  // start event processing loop (very important,in fact essential for GUI)
	      // 40 ms roughly equates to 1000ms/25fps = 4ms per frame

		  key = waitKey(EVENT_LOOP_DELAY);

		  if (key == 'x'){

	   		// if user presses "x" then exit

			  	std::cout << "Keyboard exit requested : exiting now - bye!"
				  		  << std::endl;
	   			keepProcessing = false;
		  }
	  }

	  // the camera will be deinitialized automatically in VideoCapture destructor

      // all OK : main returns 0

      return 0;
    }

    // not OK : main returns -1

    return -1;
}
/******************************************************************************/


matlab lowpass

function [H,D]=lpfilter(type,M,N,D0,n) 
%LPFILTER Computes frequency domain lowpass filters. 
% [H,D]=LPFILTER(TYPE,M,N,D0,n) creates the transfer function of a lowpass 
% filter, H, of the specified TYPE and size (M-by-N). To view the filter as 
% an image or mesh plot, it should be centered using H=fftshift(H). 

% valid values for TYPE,D0, and n are: 

% 'ideal'  Ideal lowpass filter with cutoff frequency D0. n need not be 
%          supplied. D0 must be positive. 

%  'btw'   Butterworth lowpass filter of order n, and cutoff D0. The 
%          default value for n is 1.0. D0 must be positive. 

%  'gaussian'  Gaussian lowpass filter with cutoff(standard deviation) D0. 
%              n need not be supplied. D0 must be positive. 
%Use funtion dftuv to set up the meshgrid arrays needed for computing the 
%required distances. 
[U,V]=dftuv(M,N); 
 
%compute the distances D(U,V) 
D=sqrt(U.^2+V.^2); 
 
%Begin filter computation. 
switch type 
    case 'ideal' 
        H=double(D<=D0); 
    case 'btw' 
        if nargin==4 
            n=1; 
        end 
        H=1./(1+(D./D0).^(2*n)); 
    case 'gaussian' 
        H=exp(-(D.^2)./(2*(D0^2))); 
    otherwise 
        error('Unknow filter type'); 
end 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值