C语言中cvpoint后运行出错,c++ - 使用cvCreateSeq时出现未处理的异常 - 堆栈内存溢出...

我正在使用cvCreateSeq函数在opencv中创建序列,而调试时会发生异常,例如“未处理的异常在0x7c812afb”。我使用C语言进行编码,并且IDE是Visual C ++ 2010 Express Edition。

谁能告诉我为什么发生此异常。

谢谢,

代码是:-

void main()

{

char * file = "D:\\testImage.jpg";

temp(file);

}

void computeCococlust(char * filepath)

{

CvMemStorage * storageContour = NULL;

CvSeq * first_contour = NULL;

IplImage * iOriginal = NULL;

/*Load the image*/

iOriginal = cvLoadImage(filepath,CV_LOAD_IMAGE_UNCHANGED);

/*Load a Grayscale image*/

IplImage * iGray = cvLoadImage(filepath,CV_LOAD_IMAGE_GRAYSCALE);

/*Show original image in a window named 'Original Image'*/

fnShowImageInWindow("Original Image",iOriginal);

/*Show graylevel image in a window named 'GrayLevel Image'*/

fnShowImageInWindow("GrayLevel Image",iGray);

fnReleasingMemoryOfWindow("GrayLevel Image",iGray);

/*------Getting single channel image as red green blue from RGB iOriginal----------*/

IplImage *iRed = cvCreateImage(cvGetSize(iOriginal), iOriginal->depth, 1);

IplImage *iGreen = cvCreateImage(cvGetSize(iOriginal), iOriginal->depth, 1);

IplImage *iBlue = cvCreateImage(cvGetSize(iOriginal), iOriginal->depth, 1);

cvSplit(iOriginal, iBlue, iGreen, iRed, NULL);

/*Show iRed,iGreen,iBlue in window*/

fnShowImageInWindow("Red Component Image",iRed);

fnShowImageInWindow("Green Component Image",iGreen);

fnShowImageInWindow("Blue Component Image",iBlue);

/*---------------------------------------------------------------------------------*/

/*--------Perform canny edge detection--------------------------------------------*/

/*For Red Component*/

IplImage * eRed = cvCreateImage(cvGetSize(iRed),iRed->depth,1);

cvCanny(iRed,eRed,10,100,3);

/*For Green Component*/

IplImage * eGreen = cvCreateImage(cvGetSize(iGreen),iGreen->depth,1);

cvCanny(iGreen,eGreen,10,100,3);

/*For Blue Component*/

IplImage * eBlue = cvCreateImage(cvGetSize(iBlue),iBlue->depth,1);

cvCanny(iBlue,eBlue,10,100,3);

/*-----Show eRed,eGreen,eBlue in window------------------------------------------*/

fnShowImageInWindow("Red Component Edge Image",eRed);

fnShowImageInWindow("Green Component Edge Image",eGreen);

fnShowImageInWindow("Blue Component Edge Image",eBlue);

/*-------------------------------------------------------------------------------*/

/*-----Performing union of edge images by using cvMax-----------------------------*/

IplImage * iMaxTmp = cvCreateImage(cvGetSize(iOriginal),iOriginal->depth,1);

IplImage * iUnionImage = cvCreateImage(cvGetSize(iOriginal),iOriginal->depth,1);

cvMax(eRed,eGreen,iMaxTmp);

cvMax(iMaxTmp,eBlue,iUnionImage);

fnShowImageInWindow("union of all images",iUnionImage);

/*-------------------------------------------------------------------------------*/

/*----Getting the boundary pixel of each connected component---------------------*/

storageContour = cvCreateMemStorage(0);

int numCountour = cvFindContours(iUnionImage,storageContour,&first_contour,sizeof(CvContour),CV_RETR_CCOMP,CV_CHAIN_APPROX_NONE);

printf("Total countours detected %d",numCountour);

CvMemStorage * storage = cvCreateMemStorage(0);

/*-------------------------------------------------------------------------------*/

/*----Smoothing the contour------------------------------------------------------*/

for(CvSeq * seqSmooth = first_contour;seqSmooth!=NULL;seqSmooth = seqSmooth->h_next)

{

CvSeq * newSeq = cvCreateSeq(CV_32FC1,sizeof(CvSeq),sizeof(CvPoint),storage);

for(int i =3 ; itotal-2 ;i++)

{

CvPoint * ps1 = CV_GET_SEQ_ELEM(CvPoint,seqSmooth,i-2);

CvPoint * ps2 = CV_GET_SEQ_ELEM(CvPoint,seqSmooth,i-1);

CvPoint * ps3 = CV_GET_SEQ_ELEM(CvPoint,seqSmooth,i);

CvPoint * ps4 = CV_GET_SEQ_ELEM(CvPoint,seqSmooth,i+1);

CvPoint * ps5 = CV_GET_SEQ_ELEM(CvPoint,seqSmooth,i+2);

CvPoint newPoint = cvPoint((ps1->x + ps2->x + ps3->x + ps4->x + ps5->x)/5,(ps1->y + ps2->y + ps3->y + ps4->y + ps5->y)/5);

cvSeqPush(newSeq,&newPoint);

}

if(storageContour->bottom->prev != NULL)

{

CvMemBlock * oldNext = storageContour->bottom->next;

CvMemBlock * oldPrev = storageContour->bottom->prev;

storageContour->bottom = newSeq->storage->bottom;

storageContour->bottom->next = oldNext;

storageContour->bottom->prev = oldPrev;

}

else

{

CvMemBlock * oldNext = storageContour->bottom->next;

storageContour->bottom = newSeq->storage->bottom;

}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
cv2.camshift() 是 OpenCV 用于实现 CAMShift 算法的函数,其源代码可以在 OpenCV 的 GitHub 库找到。 以下是 cv2.camshift() 的源代码: ``` CV_IMPL int cvCamShift( CvArr* imgProb, CvRect _window, CvTermCriteria criteria, CvConnectedComp* _comp, CvBox2D* box ) { const int TOLERANCE = 10; int k, row, col, pix, iter; int max_iter = criteria.max_iter; float hsize[] = {0.25f, 0.25f, 0.5f, 0.5f, 0.75f, 1.f, 1.f, 1.f}; float hranges[] = {-180, 180}; float vmin = 0, vmax = 180, smin = 0; float hist_thresh; CvSize sz; IplImage stub, *prob = (IplImage*)imgProb, *mask = 0; uchar *mask_row = 0; int mask_step = 0; CvHistogram* hist = 0; CvMoments m; CvPoint2D32f center, old_center; float a, b, c, xscale, yscale, max_val; CvBox2D box0; CV_FUNCNAME( &quot;cvCamShift&quot; ); __BEGIN__; if( !CV_IS_IMAGE( prob )) CV_ERROR( CV_StsBadArg, &quot;The probability map is not a valid image&quot; ); if( prob->depth != IPL_DEPTH_32F || prob->nChannels != 1 ) CV_ERROR( CV_StsUnsupportedFormat, &quot;Only 32-bit floating-point, single-channel probability images are supported&quot; ); if( !CV_ARE_SIZES_EQ( prob, &_window )) CV_ERROR( CV_StsUnmatchedSizes, &quot;The probability map size differs from the tracking window size&quot; ); if( criteria.type & CV_TERMCRIT_EPS ) criteria.epsilon *= criteria.epsilon; else criteria.epsilon = 0; criteria.epsilon = MAX( criteria.epsilon, 1e-8f ); criteria.max_iter = MAX( criteria.max_iter, 1 ); if( criteria.type & CV_TERMCRIT_ITER ) criteria.max_iter = MIN( criteria.max_iter, 100 ); else criteria.max_iter = 100; // allocate images sz = cvGetSize( prob ); mask = cvCreateImage( sz, IPL_DEPTH_8U, 1 ); cvRectangle( mask, cvPoint(0,0), cvPoint(sz.width,sz.height), CV_RGB(255,255,255), -1 ); cvSetImageROI( mask, _window ); cvSet( mask, cvScalar(0) ); cvResetImageROI( mask ); hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &h_ranges, 1 ); cvCalcArrHist( &prob, hist, 0 ); cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 ); cvConvertScale( hist->bins, hist->bins, max_val? 255.0/max_val : 0, 0 ); // cam-shift iteration center.x = (_window.x + _window.width - 1)*0.5f; center.y = (_window.y + _window.height - 1)*0.5f; old_center = center; box0.center = center; box0.size.width = _window.width; box0.size.height = _window.height; box0.angle = 0; iter = 0; for(;;) { CvBox2D box1; float *row_ptr; float m00, m10, m01; if( center.x <= 0 || center.x >= sz.width-1 || center.y <= 0 || center.y >= sz.height-1 ) break; cvSetImageROI( prob, _window ); cvSetImageROI( mask, _window ); cvCalcArrHist( &prob, hist, 0 ); cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 ); hist_thresh = max_val * (1.f - criteria.epsilon); cvThreshHist( hist, hist_thresh ); cvNormalizeHist( hist, 1 ); // find the x and y gradients assert( CV_MAT_DEPTH( prob->type ) == CV_32F ); cvSobel( prob, dx, 1, 0, 1 ); cvSobel( prob, dy, 0, 1, 1 ); // initialize the transition matrix H = J'J m00 = m10 = m01 = 0; row_ptr = (float*)(dx->imageData + dx->widthStep); for( row = 1; row <= _window.height; row++, row_ptr += dx->widthStep ) for( col = 1; col <= _window.width; col++ ) { pix = cvRound(row_ptr[col]); m00 += pix*pix; m10 += (float)col*pix; m01 += (float)row*pix; } H[0] = m00; H[1] = m10; H[2] = m01; H[3] = m10; H[4] = (float)_window.width*_window.width; H[5] = 0; H[6] = m01; H[7] = 0; H[8] = (float)_window.height*_window.height; // calculate the update step cvSolve( &H, &dh, &dp, CV_LU ); if( fabs(dp.x) > (float)_window.width*TOLERANCE || fabs(dp.y) > (float)_window.height*TOLERANCE ) break; // update the window position center.x += dp.x; center.y += dp.y; _window.x = cvRound(center.x - _window.width*0.5f); _window.y = cvRound(center.y - _window.height*0.5f); if( iter >= max_iter ) break; if( _window.x < 0 || _window.x + _window.width >= sz.width || _window.y < 0 || _window.y + _window.height >= sz.height ) break; box1.center = center; box1.size.width = _window.width; box1.size.height = _window.height; box1.angle = 0; a = (float)fabs( box0.size.width - box1.size.width ); b = (float)fabs( box0.size.height - box1.size.height ); c = (float)fabs( box0.angle - box1.angle ); c = c >= 180 ? 360 - c : c; if( a < _window.width*0.05f && b < _window.height*0.05f && c < 2.0f ) break; box0 = box1; iter++; } if( _comp ) { memset( _comp, 0, sizeof(*_comp)); _comp->rect.x = _window.x; _comp->rect.y = _window.y; _comp->rect.width = _window.width; _comp->rect.height = _window.height; _comp->area = 0; mask_row = (uchar*)(mask->imageData + mask->widthStep*(int)_window.y); mask_step = mask->widthStep; } if( box ) { box->center = center; box->size.width = _window.width; box->size.height = _window.height; box->angle = 0; } if( _comp ) { for( row = 0; row < _window.height; row++, mask_row += mask_step ) for( col = 0; col < _window.width; col++ ) if( mask_row[col] ) _comp->area++; if( _comp->area > 0 ) { _comp->value = -1; _comp->rect.width--; _comp->rect.height--; cvCalcSubPixelMoment( &m, prob, &center ); _comp->centroid.x = m.m10/m.m00 + _window.x; _comp->centroid.y = m.m01/m.m00 + _window.y; } } __END__; cvReleaseImage( &mask ); cvReleaseHist( &hist ); cvReleaseImage( &dx ); cvReleaseImage( &dy ); return cvGetErrStatus(); } ``` 该函数的实现包括以下步骤: 1. 对输入的概率图像进行判断; 2. 分配内存; 3. 计算直方图; 4. CAMShift 迭代; 5. 返回结果。 在 CAMShift 迭代,该函数会计算梯度,判断迭代是否终止,并更新窗口位置和大小。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值