opencv实现基于边缘的形状匹配算法

1.参考资料 https://www.codeproject.com/Articles/99457/Edge-Based-Template-Matching

用opencv编写的形状匹配算法,但不具旋转和缩放功能。

著名机器视觉软件Halcon 的开发人员出版的一本书

2.Machine Vision Algorithms and Applications [Carsten Steger, Markus Ulrich, Christian Wiedemann]

中译本《机器视觉算法与应用》

这本书的3.11节介绍了halcon中形状匹配算法的原理。

=========================================================

       对于工业应用来说,往往需要用到形状匹配来达到定位功能,VisionPro的PatMax算法,Halcon的形状匹配算法都是基于边缘的模版匹配。halcon中的形状匹配具有良好的鲁棒性,稳定,准确,快速的特点。opencv中虽然也有形状匹配算法,但是,是基于七阶不变矩来计算轮廓相似度,具有旋转缩放不变性。因此,无法求出目标形状的旋转和缩放系数。并且对于形状变换不大的轮廓也很难区分开,比如圆形和正方形。

下面说下实现带旋转和缩放的形状匹配算法的主要流程,详细原理请自行查阅文献2。文章会给出主要部分的实现代码供大家参考学习。

1.获取模版

(1).对模板图像进行一系列旋转,缩放,以及金字塔下采样,生成一系列不同旋转角度,缩放系数,以及金字塔层数的模板。

(2).提取模板的边缘。依据Canny算法的原理,提取边缘点。

(3).计算边缘点在的x,y方向的梯度值以及总的梯度值。

(4).保存边缘点对应的x,y梯度,并将梯度强度归一化处理以消除光照不均的影响(1除以该点梯度强度,这样得到的值都是[0,1]区间内的值)并将边缘点坐标转换为相对于重心的相对坐标

经过以上操作,我们便建立好了一系列旋转,缩放,以及不同金字塔层模版。

头文件结构

struct IplImageArr
{
	IplImage * img;
};

struct ImgEdgeInfo用来存储目标图像多尺度的梯度信息
{
	int16_t  *pBufGradX ;
	int16_t  *pBufGradY ;
	float	    *pBufMag;
};
struct PyramidEdgePoints
{
	int     level;
	int	    numOfCordinates;	//坐标点个数
	Point   *edgePoints;        //坐标点
	double	*edgeMagnitude;		//梯度幅值数列
	double  *edgeDerivativeX;	//X方向梯度
	double  *edgeDerivativeY;	//Y方向梯度
	Point   centerOfGravity;	//模板重心坐标
};
struct AngleEdgePoints
{
	PyramidEdgePoints *pyramidEdgePoints;
	double  templateAngle;

};
struct ScaleEdgePoints
{
	AngleEdgePoints *angleEdgePoints;
	double scaleVale;
};
//匹配结果结构体
struct MatchResult
{
	int nums;
	double          scale;
	int             level;
	int 			Angel;						//匹配角度
	int 			CenterLocX;				//匹配参考点X坐标
	int			CenterLocY;				//匹配参考点Y坐标
	float 		ResultScore;				//匹配的分
};
//搜索区域
struct search_region
{
	int 	StartX;											//X方向起点
	int 	StartY;											//y方向起点
	int 	EndX;											//x方向终点
	int 	EndY;											//y方向终点
};
class ShapeMatch
{
private:
	ScaleEdgePoints* scaleEdgePoints;//坐标点数列
	int				modelHeight;		//模板图像高度
	int				modelWidth;			//模板图像宽度
	bool			modelDefined;
	Point           gravityPoint;
	void CreateDoubleMatrix(double **&matrix, Size size);
	void ReleaseDoubleMatrix(double **&matrix, int size);
	void ShapeMatch::rotateImage(IplImage* srcImage, IplImage* dstImage, float Angle);
public:
	ShapeMatch(void);
	float new_rsqrt(float f);
	~ShapeMatch(void);
	int CreateMatchModel(IplImage *templateArr, double maxContrast, double minContrast, int pyramidnums,double anglestart, double angleend,double anglestep,double scalestart,double scaleend, double scalestep);
	int ShapeMatch::CalEdgeCordinates(IplImage *templateArr, double maxContrast, double minContrast, PyramidEdgePoints *PyramidEdgePtr);
	double FindGeoMatchModel(IplImage* srcarr, double minScore, double greediness, CvPoint *resultPoint, int pyramidnums, double anglestart, double angleend, double anglestep, double scalestart, double scaleend, double scalestep);

	void DrawContours(IplImage* source, CvScalar color, int lineWidth,  Point   *cordinates, Point  centerOfGravity, int noOfCordinates);
	void extract_shape_info(IplImage *ImageData, PyramidEdgePoints *PyramidEdgePtr, int Contrast, int MinContrast);
	void shape_match_accurate(IplImage *SearchImage, PyramidEdgePoints *ShapeInfoVec, int Contrast, int MinContrast, float MinScore, float Greediness, search_region *SearchRegion, MatchResult *ResultList, ImgEdgeInfo *imgEdgeInfo);
	void CalSearchImgEdg(IplImage *SearchImage, ImgEdgeInfo *imgEdgeInfo);
	Point extract_shape_info(IplImage *ImageData, int Contrast, int MinContrast);
};

生成模板的程序如下。

int ShapeMatch::CreateMatchModel(IplImage *templateArr, double maxContrast, double minContrast, int pyramidnums, double anglestart, double angleend, double anglestep, double scalestart, double scaleend,double scalestep)
{	
	int scalenum = abs(scaleend - scalestart) / scalestep+1;
	int anglenum = abs(angleend - anglestart) / anglestep+1;
	scaleEdgePoints = (ScaleEdgePoints *)malloc(scalenum * sizeof(ScaleEdgePoints));
    求模板重心
	gravityPoint = extract_shape_info(templateArr, maxContrast, minContrast);
	
	for (int i = 0; i < scalenum; i++)
	{
		scaleEdgePoints[i].angleEdgePoints= (AngleEdgePoints *)malloc(anglenum * sizeof(AngleEdgePoints));
		scaleEdgePoints[i].scaleVale = scalestart + i*scalestep;
		AngleEdgePoints *angleEdgePtr = scaleEdgePoints[i].angleEdgePoints;
		for (int j = 0; j < anglenum; j++)
		{
			angleEdgePtr[j].pyramidEdgePoints = (PyramidEdgePoints *)malloc((1+pyramidnums)* sizeof(PyramidEdgePoints));
			angleEdgePtr[j].templateAngle= anglestart + j*anglestep;
			PyramidEdgePoints *pyramidEdgePtr = angleEdgePtr[j].pyramidEdgePoints;
			IplImage * scaleAngleImage= cvCreateImage(cvSize(templateArr->width*(scalestart + i*scalestep), templateArr->height*(scalestart + i*scalestep)), IPL_DEPTH_8U, 1);
			cvResize(templateArr, scaleAngleImage);
			rotateImage(scaleAngleImage, scaleAngleImage, anglestart + j*anglestep);
			IplImage * tempDownImg=cvCreateImage(cvSize(round(scaleAngleImage->width), round(scaleAngleImage->height)), IPL_DEPTH_8U, 1);
			cvCopy(scaleAngleImage, tempDownImg);
			extract_shape_info(tempDownImg, &(pyramidEdgePtr[0]), maxContrast, minContrast );
		
			for (int k = 1; k <= pyramidnums; k++)
			{
				pyramidEdgePtr[k].level = k;
				CvSize size;
				if (tempDownImg->height % 2 == 0)
					size.height = tempDownImg->height >> 1;
				else 
					size.height = floor(tempDownImg->height >> 1)+1;
				if (tempDownImg->width % 2 == 0)
					size.width = tempDownImg->width >> 1;
				else
					size.width = floor(tempDownImg->width >> 1) + 1;
		
				IplImage* pyDownImg=cvCreateImage(size, IPL_DEPTH_8U, 1);
				cvPyrDown(tempDownImg, pyDownImg);
			
				tempDownImg = cvCreateImage(cvSize( pyDownImg->width, pyDownImg->height), IPL_DEPTH_8U, 1);
				cvCopy(pyDownImg, tempDownImg);
				
				extract_shape_info(pyDownImg, &(pyramidEdgePtr[k]), maxContrast, minContrast);
				//DrawContours(pyDownImg, CvScalar(0, 0, 255), 1, pyramidEdgePtr[k].edgePoints, pyramidEdgePtr[k].centerOfGravity, pyramidEdgePtr[k].numOfCordinates);
				
			}
		}
	}
	return 1;
}
/提取轮廓
void ShapeMatch::extract_shape_info(IplImage *ImageData, PyramidEdgePoints *PyramidEdgePtr, int Contrast, int MinContrast)
{

		/* source image size */
	int width = ImageData->width;
	int height = ImageData->height;
	int widthstep = ImageData->widthStep;
	/* Compute buffer sizes */
	uint32_t  bufferSize = widthstep * height;
	PyramidEdgePtr->numOfCordinates = 0;											//initialize	
	PyramidEdgePtr->edgePoints = new Point[bufferSize];		//Allocate memory for coorinates of selected points in template image

	PyramidEdgePtr->edgeMagnitude = new double[bufferSize];		//Allocate memory for edge magnitude for selected points
	PyramidEdgePtr->edgeDerivativeX = new double[bufferSize];			//Allocate memory for edge X derivative for selected points
	PyramidEdgePtr->edgeDerivativeY = new double[bufferSize];			Allocate memory for edge Y derivative for selected points

	/* Allocate buffers for each vector */
	uint8_t  *pInput = (uint8_t *)malloc(bufferSize * sizeof(uint8_t));
	uint8_t  *pBufOut = (uint8_t *)malloc(bufferSize * sizeof(uint8_t));
	int16_t  *pBufGradX = (int16_t *)malloc(bufferSize * sizeof(int16_t));
	int16_t  *pBufGradY = (int16_t *)malloc(bufferSize * sizeof(int16_t));
	int32_t	*pBufOrien = (int32_t *)malloc(bufferSize * sizeof(int32_t));
	float	    *pBufMag = (float *)malloc(bufferSize * sizeof(float));


	if (pInput && pBufGradX && pBufGradY && pBufMag && pBufOrien && pBufOut)
	{
		//gaussian_filter(ImageData, pInput, width, height);
		memcpy(pInput, ImageData->imageData, bufferSize * sizeof(uint8_t));
		memset(pBufGradX, 0, bufferSize * sizeof(int16_t));
		memset(pBufGradY, 0, bufferSize * sizeof(int16_t));
		memset(pBufOrien, 0, bufferSize * sizeof(int32_t));
		memset(pBufOut, 0, bufferSize * sizeof(uint8_t));
		memset(pBufMag, 0, bufferSize * sizeof(float));

		float MaxGradient = -9999.99f;
		int count = 0, i, j; // count variable;

		for (i = 1; i < width - 1; i++)
		{
			for (j = 1; j < height - 1; j++)
			{
				int16_t sdx = *(pInput + j*widthstep + i + 1) - *(pInput + j*widthstep + i - 1);
				int16_t sdy = *(pInput + (j + 1)*widthstep + i) - *(pInput + (j - 1)*widthstep + i);
				*(pBufGradX + j*widthstep + i) = sdx;
				*(pBufGradY + j*widthstep + i) = sdy;
				float MagG = sqrt((float)(sdx*sdx) + (float)(sdy*sdy));
				*(pBufMag + j*widthstep + i) = MagG;

				// get maximum gradient value for normalizing.
				if (MagG>MaxGradient)
					MaxGradient = MagG;
			}
		}

		for (i = 1; i < width - 1; i++)
		{
			for (j = 1; j < height - 1; j++)
			{
				int16_t fdx = *(pBufGradX + j*widthstep + i);
				int16_t fdy = *(pBufGradY + j*widthstep + i);

				float direction = cvFastArctan((float)fdy, (float)fdx);	 //Direction = invtan (Gy / Gx)

																		 // get closest angle from 0, 45, 90, 135 set
				if ((direction>0 && direction < 22.5) || (direction >157.5 && direction < 202.5) || (direction>337.5 && direction<360))
					direction = 0;
				else if ((direction>22.5 && direction < 67.5) || (direction >202.5 && direction <247.5))
					direction = 45;
				else if ((direction >67.5 && direction < 112.5) || (direction>247.5 && direction<292.5))
					direction = 90;
				else if ((direction >112.5 && direction < 157.5) || (direction>292.5 && direction<337.5))
					direction = 135;
				else
					direction = 0;

				pBufOrien[count] = (int32_t)direction;
				count++;
			}
		}

		count = 0; // init count
				   // non maximum suppression
		float leftPixel, rightPixel;

		for (i = 1; i < width - 1; i++)
		{
			for (j = 1; j < height - 1; j++)
			{
				switch (pBufOrien[count])
				{
				case 0:
					leftPixel = *(pBufMag + j*widthstep + i - 1);
					rightPixel = *(pBufMag + j*widthstep + i + 1);
					break;
				case 45:
					leftPixel = *(pBufMag + (j - 1)*widthstep + i - 1);
					rightPixel = *(pBufMag + (j + 1)*widthstep + i + 1);
					break;
				case 90:
					leftPixel = *(pBufMag + (j - 1)*widthstep + i);
					rightPixel = *(pBufMag + (j + 1)*widthstep + i);

					break;
				case 135:
					leftPixel = *(pBufMag + (j + 1)*widthstep + i - 1);
					rightPixel = *(pBufMag + (j - 1)*widthstep + i + 1);
					break;
				}
				// compare current pixels value with adjacent pixels
				if ((*(pBufMag + j*widthstep + i) < leftPixel) || (*(pBufMag + j*widthstep + i) < rightPixel))
				{
					*(pBufOut + j*widthstep + i) = 0;
				}
				else
					*(pBufOut + j*widthstep + i) = (uint8_t)(*(pBufMag + j*widthstep + i) / MaxGradient * 255);

				count++;
			}
		}
		int RSum = 0, CSum = 0;
		int curX, curY;
		int flag = 1;
		int n = 0;
		int iPr = 1;
		//Hysteresis threshold
		for (i = 1; i < width - 1; i += iPr)
		{
			for (j = 1; j < height - 1; j += iPr)
			{
				int16_t fdx = *(pBufGradX + j*widthstep + i);
				int16_t fdy = *(pBufGradY + j*widthstep + i);
				float MagG = *(pBufMag + j*widthstep + i);

				flag = 1;
				if ((float)*(pBufOut + j*widthstep + i) < Contrast)
				{
					if ((float)*(pBufOut + j*widthstep + i) < MinContrast)
					{
						*(pBufOut + j*widthstep + i) = 0;
						flag = 0; // remove from edge
					}
					else
					{   // if any of 8 neighboring pixel is not greater than max contract remove from edge
						if (((float)*(pBufOut + (j - 1)*widthstep + i - 1) < Contrast) &&
							((float)*(pBufOut + j     * widthstep + i - 1) < Contrast) &&
							((float)*(pBufOut + (j - 1) * widthstep + i - 1) < Contrast) &&
							((float)*(pBufOut + (j - 1) * widthstep + i) < Contrast) &&
							((float)*(pBufOut + (j + 1)* widthstep + i) < Contrast) &&
							((float)*(pBufOut + (j - 1) * widthstep + i + 1) < Contrast) &&
							((float)*(pBufOut + j     * widthstep + i + 1) < Contrast) &&
							((float)*(pBufOut + (j + 1)  * widthstep + i + 1) < Contrast))
						{
							*(pBufOut + j*widthstep + i) = 0;
							flag = 0;
						}
					}
				}

				// save selected edge information
				curX = i;	curY = j;
				if (flag != 0)
				{
					if (fdx != 0 || fdy != 0)
					{
						RSum = RSum + curX;
						CSum = CSum + curY; // Row sum and column sum for center of gravity

						PyramidEdgePtr->edgePoints[n].x = curX;
						PyramidEdgePtr->edgePoints[n].y = curY;
						PyramidEdgePtr->edgeDerivativeX[n] = fdx;
						PyramidEdgePtr->edgeDerivativeY[n] = fdy;

						//handle divide by zero
						if (MagG != 0)
							PyramidEdgePtr->edgeMagnitude[n] = 1 / MagG;  // gradient magnitude 
						else
							PyramidEdgePtr->edgeMagnitude[n] = 0;
						n++;
					}
				}
			}
		}
		if (n != 0)
		{
			PyramidEdgePtr->numOfCordinates = n;
			PyramidEdgePtr->centerOfGravity.x = RSum / n;			 // center of gravity
			PyramidEdgePtr->centerOfGravity.y = CSum / n;			 // center of gravity
			//PyramidEdgePtr->centerOfGravity.x = width / 2;			 // center of image
			//PyramidEdgePtr->centerOfGravity.y = height / 2;		     // center of image
		}
		// change coordinates to reflect center of reference
		int m, temp;
		for (m = 0; m < PyramidEdgePtr->numOfCordinates; m++)
		{
			temp = (PyramidEdgePtr->edgePoints + m)->x;
			(PyramidEdgePtr->edgePoints + m)->x = temp - PyramidEdgePtr->centerOfGravity.x;
			temp = (PyramidEdgePtr->edgePoints + m)->y;
			(PyramidEdgePtr->edgePoints + m)->y = temp - PyramidEdgePtr->centerOfGravity.y;
		}
	}

	free(pBufMag);
	free(pBufOrien);
	free(pBufGradY);
	free(pBufGradX);
	free(pBufOut);
	free(pInput);
}

2,模版匹配

(1).计算搜索图像边缘点梯度信息。同获取模板 (2)(3)。

(2).金字塔下采样,得到多级金字塔图像的边缘点梯度信息,再分别进行(1)。

(3).这步是最重要的,通过 归一化交叉相关(NCC) 算法计算模板边缘梯度和目标图像边缘梯度向量的相关性。

而且该算法得到的值就是匹配相关性的得分,分值范围在[0,1],具体实现可以去看代码。其实就是使用事先生成的一些列模板让重心在搜索图像中平移,每移动一步计算一下边缘点对应的梯度向量相关性。找到评分最高的点就是匹配到形状的重心。所用模板的旋转和缩放系数,就对应搜索图像中目标的旋转和缩放。其中金字塔用来对算法进行加速。先在顶层金字塔进行快速搜索匹配得到一个匹配位置,然后在下一层金字塔进行匹配的时候就能在该区域的roi内进行搜索,以此类推,直到最底层。通过金字塔可以大大加快匹配速度。在搜索匹配过程中还采用了一种停止条件用来提高速度,如果计算边缘点梯度相似性过程中得分过低,就可以跳过后续边缘点的计算,直接移动到下一个位置。

匹配过程主要程序:

double ShapeMatch::FindGeoMatchModel(IplImage* srcarr, double minScore, double greediness, CvPoint *resultPoint, int pyramidnums, double anglestart, double angleend, double anglestep, double scalestart, double scaleend, double scalestep)
{
	if (srcarr == NULL)
		return -1;
	CvSize srcImgSize = cvSize(srcarr->width, srcarr->height);
	IplImage* grayImg = cvCreateImage(srcImgSize, IPL_DEPTH_8U, 1);

	// Convert color image to gray image.
	if (srcarr->nChannels == 3)
	{
		cvCvtColor(srcarr, grayImg, CV_RGB2GRAY);
	}
	else
	{
		cvCopy(srcarr, grayImg);
	}
	double resultScore = 0;
	double maxScore=0;
	int maxScoreId=0;
	PyramidEdgePoints *matchEdgePoints=new PyramidEdgePoints;
	double partialSum = 0;
	double sumOfCoords = 0;
	double partialScore;
	CvSize Ssize;
	CvPoint tempMatchPoint(0,0);
	AngleEdgePoints *angleEdgePtr;
	PyramidEdgePoints *pyramidEdgePtr;
	int scalenum = abs(scaleend - scalestart) / scalestep + 1;
	int anglenum = abs(angleend - anglestart) / anglestep + 1;
	ImgEdgeInfo *imgEdgeInfo= (ImgEdgeInfo *)malloc((pyramidnums + 1) * sizeof(ImgEdgeInfo));

	IplImageArr  *pyDownImgArr= (IplImageArr *)malloc((pyramidnums+1) * sizeof(IplImageArr));
	IplImage * tempDownImg = cvCreateImage(cvSize(grayImg->width, grayImg->height), IPL_DEPTH_8U, 1);
	cvCopy(grayImg, tempDownImg);
	pyDownImgArr[0].img = cvCreateImage(cvSize(grayImg->width, grayImg->height), IPL_DEPTH_8U, 1);
	cvCopy(grayImg, pyDownImgArr[0].img);
	CalSearchImgEdg(tempDownImg, &(imgEdgeInfo[0]));
	for (int i=1;i<=pyramidnums;i++)
	{
		CvSize size;
		if (tempDownImg->height % 2 == 0)
			size.height = tempDownImg->height >> 1;
		else
			size.height = floor(tempDownImg->height >> 1) + 1;
		if (tempDownImg->width % 2 == 0)
			size.width = tempDownImg->width >> 1;
		else
			size.width = floor(tempDownImg->width >> 1) + 1;
		//CvSize size = cvSize(floor(tempDownImg->height>>1), floor(tempDownImg->width>>1));///
		IplImage* pyDownImg = cvCreateImage(size, IPL_DEPTH_8U, 1);
		pyDownImgArr[i].img= cvCreateImage(size, IPL_DEPTH_8U, 1);
		cvPyrDown(tempDownImg, pyDownImg);
		cvReleaseImage(&tempDownImg);
		tempDownImg = cvCreateImage(cvSize(pyDownImg->width, pyDownImg->height), IPL_DEPTH_8U, 1);
		cvCopy(pyDownImg, tempDownImg);
		cvCopy(pyDownImg, pyDownImgArr[i].img);
		CalSearchImgEdg(tempDownImg, &(imgEdgeInfo[i]));
		cvReleaseImage(&pyDownImg);
		/*cvNamedWindow("Search Image", 0);
		cvShowImage("Search Image", tempDownImg);
		cvWaitKey(0);*/
		//cvSaveImage("tempimg.png", tempDownImg);
	}
   // #pragma omp parallel for
	MatchResult *ResultList = new MatchResult;
	MatchResult *ResultLists = new MatchResult[9999];
	int matcnnums = 0;
	search_region *SearchRegion = new search_region;
	for (int ii = 0; ii < scalenum; ii++)
	{
		angleEdgePtr = scaleEdgePoints[ii].angleEdgePoints;
		for (int jj = 0; jj < anglenum; jj++)
		{
			pyramidEdgePtr = angleEdgePtr[jj].pyramidEdgePoints;
			
			ResultList->CenterLocX = 0;
			ResultList->CenterLocY = 0;
			
			SearchRegion->EndX = pyDownImgArr[pyramidnums].img->width-1; SearchRegion->EndY = pyDownImgArr[pyramidnums].img->height - 1;
			SearchRegion->StartX = 1; SearchRegion->StartY = 1;
			for (int kk = pyramidnums; kk >= 0; kk--)
			{
				ResultList->CenterLocX = 0;
				ResultList->CenterLocY = 0;
				shape_match_accurate(pyDownImgArr[kk].img, &(pyramidEdgePtr[kk]),80, 20,/80,20参数待修改
					minScore, greediness,SearchRegion,ResultList, &(imgEdgeInfo[kk]));
				if (ResultList->CenterLocX == 0 || ResultList->CenterLocY == 0)
				{
					break;
				}
				else
				{
					SearchRegion->StartX = ResultList->CenterLocX*2 - 6;
					SearchRegion->StartY = ResultList->CenterLocY *2 - 6;
					SearchRegion->EndX = ResultList->CenterLocX *2 +6;
					SearchRegion->EndY = ResultList->CenterLocY * 2 + 6;
					resultScore = ResultList->ResultScore;
				}
			}
			if (resultScore > minScore&&matcnnums<9999)
			{
				if (resultScore > maxScore)
				{
					maxScore = resultScore;
					maxScoreId = matcnnums;
					matchEdgePoints = &(pyramidEdgePtr[0]);//暂时注释
				}
				ResultLists[matcnnums].ResultScore = resultScore;
				ResultLists[matcnnums].CenterLocX= ResultList->CenterLocX ;
				ResultLists[matcnnums].CenterLocY= ResultList->CenterLocY;
				ResultLists[matcnnums].scale = scaleEdgePoints[ii].scaleVale;
				ResultLists[matcnnums].Angel = angleEdgePtr[jj].templateAngle;				
				matcnnums++;
				ResultLists[matcnnums].nums = matcnnums;
			}
		}
	}
	if (matcnnums > 0)
	{
		resultPoint->x = ResultLists[maxScoreId].CenterLocX; resultPoint->y = ResultLists[maxScoreId].CenterLocY;
	}
	//if (matcnnums > 0)
	//{
	//	cout << "最匹配------------------------------------" << endl;
	//	cout << "分数:" << ResultLists[maxScoreId].ResultScore << endl;
	//	cout << "x:" << ResultLists[maxScoreId].CenterLocX << endl;
	//	cout << "y:" << ResultLists[maxScoreId].CenterLocY << endl;
	//	cout << "缩放系数:" << ResultLists[maxScoreId].scale << endl;
	//	cout << "角度:" << ResultLists[maxScoreId].Angel << endl;
	//	cout << endl;

	//}///暂时注释

	if (matcnnums > 0)
	{
		DrawContours(srcarr, CvScalar(0, 0, 255), 1, matchEdgePoints->edgePoints, Point(ResultLists[maxScoreId].CenterLocX, ResultLists[maxScoreId].CenterLocY), matchEdgePoints->numOfCordinates);
	}
	cvNamedWindow("Search Image", 0);
	cvShowImage("Search Image", srcarr);
	cvWaitKey(100);
	//
	//cvDestroyWindow("Search Image");
	//cvReleaseImage(&srcarr);
	delete ResultList; ResultList = NULL;
	delete []ResultLists; ResultLists = NULL;
	delete SearchRegion; SearchRegion = NULL;
	///
	//delete matchEdgePoints;
	//
	/释放内存,由于本人之前使用了三层金字塔。如果层数不同请自行修以下部分 。
	free(imgEdgeInfo[0].pBufGradX); free(imgEdgeInfo[0].pBufGradY); free(imgEdgeInfo[0].pBufMag); imgEdgeInfo[0].pBufGradX = NULL; imgEdgeInfo[0].pBufGradY = NULL; imgEdgeInfo[0].pBufMag = NULL;
	free(imgEdgeInfo[1].pBufGradX); free(imgEdgeInfo[1].pBufGradY); free(imgEdgeInfo[1].pBufMag); imgEdgeInfo[1].pBufGradX = NULL; imgEdgeInfo[1].pBufGradY = NULL; imgEdgeInfo[1].pBufMag = NULL;
	free(imgEdgeInfo[2].pBufGradX); free(imgEdgeInfo[2].pBufGradY); free(imgEdgeInfo[2].pBufMag); imgEdgeInfo[2].pBufGradX = NULL; imgEdgeInfo[2].pBufGradY = NULL; imgEdgeInfo[2].pBufMag = NULL;
	free(imgEdgeInfo[3].pBufGradX); free(imgEdgeInfo[3].pBufGradY); free(imgEdgeInfo[3].pBufMag); imgEdgeInfo[3].pBufGradX = NULL; imgEdgeInfo[3].pBufGradY = NULL; imgEdgeInfo[3].pBufMag = NULL;
	/
	free(imgEdgeInfo); imgEdgeInfo = NULL;
	///
	cvReleaseImage(&(pyDownImgArr[0].img)); cvReleaseImage(&(pyDownImgArr[1].img)); cvReleaseImage(&(pyDownImgArr[2].img));
	cvReleaseImage(&(pyDownImgArr[3].img));
	///
	free(pyDownImgArr) ; pyDownImgArr = NULL;
	cvReleaseImage(&grayImg); 
	cvReleaseImage(&tempDownImg);
	return resultScore;

}

以上程序仅供参考,有待提高和改进的地方请自行修改。GitHub链接:https://github.com/zhouqun92/ShapeMatch

  • 24
    点赞
  • 208
    收藏
    觉得还不错? 一键收藏
  • 19
    评论
边缘模板匹配算法是在图像处理中广泛使用的一种技术,可以用于在图像中寻找特定的形状边缘。下面是使用OpenCV实现边缘模板匹配算法的步骤: 1. 加载原始图像和模板图像: ``` import cv2 img = cv2.imread('image.jpg') template = cv2.imread('template.jpg') ``` 2. 将模板图像转换为灰度图像: ``` template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) ``` 3. 使用Canny边缘检测算法检测模板图像的边缘: ``` edges = cv2.Canny(template_gray, 50, 200) ``` 4. 在原始图像中搜索与模板图像匹配边缘: ``` result = cv2.matchTemplate(img, edges, cv2.TM_CCOEFF_NORMED) ``` 5. 使用阈值来确定匹配结果的位置: ``` threshold = 0.8 locations = np.where(result >= threshold) ``` 6. 在原始图像中绘制矩形框来标记匹配的位置: ``` for loc in zip(*locations[::-1]): cv2.rectangle(img, loc, (loc[0] + w, loc[1] + h), (0, 0, 255), 2) ``` 其中,`w`和`h`是模板图像的宽度和高度。 完整代码如下: ``` import cv2 import numpy as np img = cv2.imread('image.jpg') template = cv2.imread('template.jpg') template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(template_gray, 50, 200) result = cv2.matchTemplate(img, edges, cv2.TM_CCOEFF_NORMED) threshold = 0.8 locations = np.where(result >= threshold) w, h = template_gray.shape[::-1] for loc in zip(*locations[::-1]): cv2.rectangle(img, loc, (loc[0] + w, loc[1] + h), (0, 0, 255), 2) cv2.imshow('result', img) cv2.waitKey(0) cv2.destroyAllWindows() ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 19
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值