opencv中的一些结构(Cvsize,)

二维坐标系下的点,类型为整型

typedef struct CvPoint { int x; int y; } CvPoint;
inline CvPoint cvPoint( int x, int y );
inline CvPoint cvPointFrom32f( CvPoint2D32f point )

ddd

CvPoint2D32f
二维坐标下的点,类型为浮点

typedef struct CvPoint2D32f { float x; float y; } CvPoint2D32f;
inline CvPoint2D32f cvPoint2D32f( double x, double y );
inline CvPoint2D32f cvPointTo32f( CvPoint point );

CvPoint3D32f 
三维坐标下的点,类型为浮点
typedef struct CvPoint3D32f { float x; float y; float z; } CvPoint3D32f;
inline CvPoint3D32f cvPoint3D32f( double x, double y, double z );



CvSize

矩形框大小,以像素为精度

typedef struct CvSize { int width; int height; } CvSize;
inline CvSize cvSize( int width, int height );

注意:构造函数的cv是小写!

CvSize2D32f
以亚像素精度标量矩形框大小

typedef struct CvSize2D32f { float width; float height; } CvSize2D32f;
inline CvSize2D32f cvSize2D32f( double width, double height ); { CvSize2D32f s; s.width = (float)width; s.height = (float)height; return s; }

CvRect

矩形框的偏移和大小

typedef struct CvRect { int x; int y; int width; int height; } CvRect;
inline CvRect cvRect( int x, int y, int width, int height ); { CvRect os; os.x = x; os.y = y; os.width = width; os.height = heigth; reture os;}

CvScalar

可存放在1-,2-,3-,4-TUPLE类型的捆绑数据的容器

typedef struct CvScalar { double val[4] } CvScalar;
inline CvScalar cvScalar( double val0, double val1, double val2, double val3); { CvScalar arr; arr.val[4] = {val0,val1,val2,val3}; reture arr;}
inline CvScalar cvScalarAll( double val0123 ); { CvScalar arr; arr.val[4] = {val0123,val0123,val0123,val0123,}; reture arr;}
inline CvScalar cvRealScalar( double val0 ); { CvScalar arr; arr.val[4] = {val0}; reture arr;}

http://doc.blueruby.mydns.jp/opencv/classes/OpenCV/CvScalar.html
CvTermCriteria
迭代算法的终止准则

#define CV_TERMCRIT_ITER 1 #define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER #define CV_TERMCRIT_EPS 2 typedef struct CvTermCriteria { int type; int max_iter; double epsilon; } CvTermCriteria;
inline CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon );
CvTermCriteria cvCheckTermCriteria( CvTermCriteria criteria, double default_eps, int default_max_iters ); 

CvMat 
多通道矩阵
typedef struct CvMat { int type; int step; int* refcount; union { uchar* ptr; short* s; int* i; float* fl; double* db; } data; #ifdef __cplusplus union { int rows; int height; }; union { int cols; int width; }; #else int rows; int cols; #endif } CvMat;
CvMatND 
多维、多通道密集数组
typedef struct CvMatND { int type; int dims; int* refcount; union { uchar* ptr; short* s; int* i; float* fl; double* db; } data; struct { int size; int step; } dim[CV_MAX_DIM]; } CvMatND; 

CvSparseMat 
多维、多通道稀疏数组
typedef struct CvSparseMat { int type; int dims; int* refcount; struct CvSet* heap; void** hashtable; int hashsize; int total; int valoffset; int idxoffset; int size[CV_MAX_DIM]; } CvSparseMat; 
IplImage 
IPL 图像头
typedef struct _IplImage { int nSize; int ID; int nChannels; int alphaChannel; int depth; char colorModel[4]; char channelSeq[4]; int dataOrder; int origin; int align; int width; int height; struct _IplROI *roi; struct _IplImage *maskROI; void *imageId; struct _IplTileInfo *tileInfo; int imageSize; char *imageData; int widthStep; int BorderMode[4]; int BorderConst[4]; char *imageDataOrigin; } IplImage;

IplImage结构来自于 Intel Image Processing Library(是其本身所具有的)。OpenCV 只支持其中的一个子集:


alphaChannel 在OpenCV中被忽略。
colorModel 和channelSeq 被OpenCV忽略。OpenCV颜色转换的唯一函数 cvCvtColor把原图像的颜色空间的目标图像的颜色空间作为一个参数。
dataOrder 必须是IPL_DATA_ORDER_PIXEL (颜色通道是交叉存取),然而平面图像的被选择通道可以被处理,就像COI(感兴趣的通道)被设置过一样。
align 是被OpenCV忽略的,而用 widthStep 去访问后继的图像行。
不支持maskROI 。处理MASK的函数把他当作一个分离的参数。MASK在 OpenCV 里是 8-bit,然而在 IPL他是 1-bit。
tileInfo 不支持。
BorderMode和BorderConst是不支持的。每个 OpenCV 函数处理像素的邻近的像素,通常使用单一的固定代码边际模式。

除了上述限制,OpenCV处理ROI有不同的要求。要求原图像和目标图像的尺寸或 ROI的尺寸必须(根据不同的操作,例如cvPyrDown 目标图像的宽(高)必须等于原图像的宽(高)除以2 ±1)精确匹配,而IPL处理交叉区域,如图像的大小或ROI大小可能是完全独立的。

CvArr
不确定数组

typedef void CvArr;

CvArr* 仅仅是被用于作函数的参数,用于指示函数接收的数组类型可以不止一个,如 IplImage*, CvMat* 甚至 CvSeq*. 最终的数组类型是在运行时通过分析数组头的前4 个字节判断。


ASIFT+OpenCV图像特征匹配实战VC工程源码 OpenCV包含头文件: #include "cv.h" #include "highgui.h" #include "cxcore.h" 核心代码如下: if (!m_pImage1||!m_pImage2) { AfxMessageBox("please,select 2 images!"); return; } UpdateData(TRUE); CvSize sz1 = cvSize(m_pImage1->width,m_pImage1->height); CvSize sz2 = cvSize(m_pImage2->width,m_pImage2->height); CvScalar s; IplImage *gimg1 = cvCreateImage(sz1,IPL_DEPTH_8U,1); cvCvtColor(m_pImage1,gimg1,CV_BGR2GRAY); IplImage *gimg2 = cvCreateImage(sz2,IPL_DEPTH_8U,1); cvCvtColor(m_pImage2,gimg2,CV_BGR2GRAY); size_t w1, h1; w1 = gimg1->width; h1 = gimg1->height; float * iarr1 = new float[w1*h1]; for(int i=0;i<h1;i++) { for(int j=0;j<w1;j++) { s=cvGet2D(gimg1,i,j); iarr1[i*w1+j] = s.val[0]; } } vector ipixels1(iarr1, iarr1 + w1 * h1); delete [] iarr1; size_t w2, h2; w2 = gimg2->width; h2 = gimg2->height; float * iarr2 = new float[w2*h2]; for(int i=0;i<h2;i++) { for(int j=0;j<w2;j++) { s=cvGet2D(gimg2,i,j); iarr2[i*w2+j] = s.val[0]; } } vector ipixels2(iarr2, iarr2 + w2 * h2); delete [] iarr2; float wS = IM_X; float hS = IM_Y; float zoom1=0, zoom2=0; int wS1=0, hS1=0, wS2=0, hS2=0; vector ipixels1_zoom, ipixels2_zoom; if (!m_bOrininal) { if (m_lWidth==0 || m_lHeight == 0) return; wS = m_lWidth; hS = m_lHeight; float InitSigma_aa = 1.6; float fproj_p, fproj_bg; char fproj_i; float *fproj_x4, *fproj_y4; int fproj_o; fproj_o = 3; fproj_p = 0; fproj_i = 0; fproj_bg = 0; fproj_x4 = 0; fproj_y4 = 0; float areaS = wS * hS; // Resize image 1 float area1 = w1 * h1; zoom1 = sqrt(area1/areaS); wS1 = (int) (w1 / zoom1); hS1 = (int) (h1 / zoom1); int fproj_sx = wS1; int fproj_sy = hS1; float fproj_x1 = 0; float fproj_y1 = 0; float fproj_x2 = wS1; float fproj_y2 = 0; float fproj_x3 = 0; float fproj_y3 = hS1; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom1 / 2; GaussianBlur1D(ipixels1,w1,h1,sigma_aa,1); GaussianBlur1D(ipixels1,w1,h1,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels1_zoom.resize(wS1*hS1); fproj (ipixels1, ipixels1_zoom, w1, h1, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); // Resize image 2 float area2 = w2 * h2; zoom2 = sqrt(area2/areaS); wS2 = (int) (w2 / zoom2); hS2 = (int) (h2 / zoom2); fproj_sx = wS2; fproj_sy = hS2; fproj_x2 = wS2; fproj_y3 = hS2; /* Anti-aliasing filtering along vertical direction */ if ( zoom1 > 1 ) { float sigma_aa = InitSigma_aa * zoom2 / 2; GaussianBlur1D(ipixels2,w2,h2,sigma_aa,1); GaussianBlur1D(ipixels2,w2,h2,sigma_aa,0); } // simulate a tilt: subsample the image along the vertical axis by a factor of t. ipixels2_zoom.resize(wS2*hS2); fproj (ipixels2, ipixels2_zoom, w2, h2, &fproj;_sx, &fproj;_sy, &fproj;_bg, &fproj;_o, &fproj;_p, &fproj;_i , fproj_x1 , fproj_y1 , fproj_x2 , fproj_y2 , fproj_x3 , fproj_y3, fproj_x4, fproj_y4); } else { ipixels1_zoom.resize(w1*h1); ipixels1_zoom = ipixels1; wS1 = w1; hS1 = h1; zoom1 = 1; ipixels2_zoom.resize(w2*h2); ipixels2_zoom = ipixels2; wS2 = w2; hS2 = h2; zoom2 = 1; } int num_of_tilts1 = m_lTilts1; int num_of_tilts2 = m_lTilts2; int verb = 0; // Define the SIFT parameters siftPar siftparameters; default_sift_parameters(siftparameters); vector< vector > keys1; vector< vector > keys2; int num_keys1=0, num_keys2=0; SetWindowText("Computing keypoints on the two images..."); CString str1,str2; time_t tstart, tend1,tend2; tstart = time(0); DWORD dstart = GetTickCount(); num_keys1 = compute_asift_keypoints(ipixels1_zoom, wS1, hS1, num_of_tilts1, verb, keys1, siftparameters); tend1 = time(0); m_lKeyNum1 = num_keys1; UpdateData(FALSE); str1.Format("Img1 Keypoints computation accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str1); num_keys2 = compute_asift_keypoints(ipixels2_zoom, wS2, hS2, num_of_tilts2, verb, keys2, siftparameters); tend2 = time(0); m_lKeyNum2 = num_keys2; UpdateData(FALSE); str2.Format("Img2 Keypoints computation accomplished in %f s ,Matching the keypoints...",difftime(tend2, tstart)); SetWindowText(str2); //// Match ASIFT keypoints int num_matchings; matchingslist matchings; tstart = time(0); num_matchings = compute_asift_matches(num_of_tilts1, num_of_tilts2, wS1, hS1, wS2, hS2, verb, keys1, keys2, matchings, siftparameters); tend1 = time(0); DWORD dSpan = GetTickCount() - dstart; cout << "Keypoints matching accomplished in " << difftime(tend1, tstart) << " seconds." << endl; str2.Format("Keypoints matching accomplished in %f s",difftime(tend1, tstart)); SetWindowText(str2); m_lMatches = num_matchings; UpdateData(FALSE); str1.Format("Total time used:%d ms",dSpan); AfxMessageBox(str1); cvRelease((void**)&gimg1;); cvRelease((void**)&gimg2;); 参考网址:http://www.ipol.im/pub/art/2011/my-asift/
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值