cvHaarDetectObjects

分类器结构及操作函数:
CvHaarFeature
[cpp]  view plain  copy
  1. #define CV_HAAR_FEATURE_MAX  3  
  2. typedef struct CvHaarFeature  
  3. {  
  4.    int  tilted;    
  5.    struct  
  6.    {  
  7.        CvRect r;  
  8.        float weight;  
  9. } rect[CV_HAAR_FEATURE_MAX];   
  10.   
  11. }  
  12. CvHaarFeature;  


一个 harr 特征由 2-3 个具有相应权重的矩形组成
titled : /* 0 means up-right feature, 1 means 45--rotated feature */
rect[CV_HAAR_FEATURE_MAX];  /* 2-3 rectangles with weights of opposite signs and       with absolute values inversely proportional to the areas of the rectangles. if rect[2].weight !=0, then  the feature consists of 3 rectangles, otherwise it consists of 2 */
CvHaarClassifier
[cpp]  view plain  copy
  1. typedef struct CvHaarClassifier  
  2. {  
  3.    int count;    
  4.    CvHaarFeature* haar_feature;  
  5.    float* threshold;  
  6.    int* left;  
  7.    int* right;  
  8.    float* alpha;  
  9. }  
  10. CvHaarClassifier;  


/* a single tree classifier (stump in the simplest case) that returns the response for the feature   at the particular image location (i.e. pixel sum over subrectangles of the window) and gives out a value depending on the responce */
int count;  /* number of nodes in the decision tree */
/* these are "parallel" arrays. Every index i corresponds to a node of the decision tree (root has 0-th index).
left[i] - index of the left child (or negated index if the left child is a leaf)
right[i] - index of the right child (or negated index if the right child is a leaf)
threshold[i] - branch threshold. if feature responce is <= threshold, left branch                      is chosen, otherwise right branch is chosed.
alpha[i] - output value correponding to the leaf. */
CvHaarStageClassifier
[cpp]  view plain  copy
  1. typedef struct CvHaarStageClassifier  
  2. {  
  3.    int  count;  /* number of classifiers in the battery */  
  4.    float threshold; /* threshold for the boosted classifier */  
  5.    CvHaarClassifier* classifier; /* array of classifiers */  
  6.    /* these fields are used for organizing trees of stage classifiers, 
  7.       rather than just stright cascades */  
  8.    int next;  
  9.    int child;  
  10.    int parent;  
  11. }  
  12. CvHaarStageClassifier;  


/* a boosted battery of classifiers(=stage classifier): the stage classifier returns 1 if the sum of the classifiers' responces is greater than threshold and 0 otherwise */
int  count;  /* number of classifiers in the battery */
float threshold; /* threshold for the boosted classifier */
CvHaarClassifier* classifier; /* array of classifiers */
/* these fields are used for organizing trees of stage classifiers, rather than just stright cascades */

[cpp]  view plain  copy
  1. CvHaarClassifierCascade  
  2. typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;  
  3. typedef struct CvHaarClassifierCascade  
  4. {  
  5.    int  flags;   
  6.    int  count;   
  7.    CvSize orig_window_size;   
  8.    CvSize real_window_size;   
  9.    double scale;  
  10.    CvHaarStageClassifier* stage_classifier;   
  11.    CvHidHaarClassifierCascade* hid_cascade;  
  12. }  
  13. CvHaarClassifierCascade;  


/* cascade or tree of stage classifiers */
int  flags; /* signature */
int  count; /* number of stages */
CvSize orig_window_size; /* original object size (the cascade is trained for) */
/* these two parameters are set by cvSetImagesForHaarClassifierCascade */
CvSize real_window_size; /* current object size */
double scale; /* current scale */
CvHaarStageClassifier* stage_classifier; /* array of stage classifiers */
CvHidHaarClassifierCascade* hid_cascade; /* hidden optimized representation of the cascade, created by cvSetImagesForHaarClassifierCascade */
所有的结构都代表一个级联boosted Haar分类器。级联有下面的等级结构:
   Cascade:
       Stage1:
           Classifier11:
               Feature11
           Classifier12:
               Feature12
           ...
       Stage2:
           Classifier21:
               Feature21
           ...
       ...
整个等级可以手工构建,也可以利用函数cvLoadHaarClassifierCascade从已有的磁盘文件或嵌入式基中导入。 
特征检测用到的函数:
cvLoadHaarClassifierCascade
从文件中装载训练好的级联分类器或者从OpenCV中嵌入的分类器数据库中导入
CvHaarClassifierCascade* cvLoadHaarClassifierCascade(
                        const char* directory,
                        CvSize orig_window_size );
directory :训练好的级联分类器的路径 
orig_window_size: 级联分类器训练中采用的检测目标的尺寸 。因为这个信息没有在级联分类器中存储,所有要单独指出。 
函数 cvLoadHaarClassifierCascade 用于从文件中装载训练好的利用海尔特征的级联分类器,或者从OpenCV中嵌入的分类器数据库中导入。分类器的训练可以应用函数 haartraining (详细察看opencv/apps/haartraining)
函数 已经过时了。现在的目标检测分类器通常存储在  XML 或 YAML 文件中,而不是通过路径导入。从文件中导入分类器,可以使用函数 cvLoad 。
cvReleaseHaarClassifierCascade
释放haar classifier cascade。
void cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
cascade :双指针类型指针指向要释放的cascade. 指针由函数声明。 
函数 cvReleaseHaarClassifierCascade 释放cascade的动态内存,其中cascade的动态内存或者是手工创建,或者通过函数 cvLoadHaarClassifierCascade 或 cvLoad分配。 
cvHaarDetectObjects
检测图像中的目标
[cpp]  view plain  copy
  1. typedef struct CvAvgComp  
  2. {  
  3. CvRect rect; /* bounding rectangle for the object (average rectangle of a group) */  
  4. int neighbors; /* number of neighbor rectangles in the group */  
  5. }  
  6. CvAvgComp;  


CvSeq* cvHaarDetectObjects( const CvArr* image,
CvHaarClassifierCascade* cascade,
                           CvMemStorage* storage,
                           double scale_factor=1.1,
                           int min_neighbors=3, int flags=0,
                           CvSize min_size=cvSize(0,0) );

image 被检图像 
cascade harr 分类器级联的内部标识形式 
storage 用来存储检测到的一序列候选目标矩形框的内存区域。 
scale_factor 在前后两次相继的扫描中,搜索窗口的比例系数。例如1.1指将搜索窗口依次扩大10%。 
min_neighbors 构成检测目标的相邻矩形的最小个数(缺省-1)。如果组成检测目标的小矩形的个数和小于min_neighbors-1 都会被排除。如果min_neighbors 为 0, 则函数不做任何操作就返回所有的被检候选矩形框,这种设定值一般用在用户自定义对检测结果的组合程序上。 
flags 操作方式。当前唯一可以定义的操作方式是 CV_HAAR_DO_CANNY_PRUNING。如果被设定,函数利用Canny边缘检测器来排除一些边缘很少或者很多的图像区域,因为这样的区域一般不含被检目标。人脸检测中通过设定阈值使用了这种方法,并因此提高了检测速度。  
min_size 检测窗口的最小尺寸。缺省的情况下被设为分类器训练时采用的样本尺寸(人脸检测中缺省大小是~20×20)。 
函数 cvHaarDetectObjects 使用针对某目标物体训练的级联分类器在图像中找到包含目标物体的矩形区域,并且将这些区域作为一序列的矩形框返回。 函数以不同比例大小的扫描窗口对图像进行几次搜索(察看cvSetImagesForHaarClassifierCascade)。 每次都要对图像中的这些重叠区域利用cvRunHaarClassifierCascade进行检测。 有时候也会利用某些继承(heuristics)技术以减少分析的候选区域,例如利用 Canny 裁减 (prunning)方法。 函数在处理和收集到候选的方框(全部通过级联分类器各层的区域)之后,接着对这些区域进行组合并且返回一系列各个足够大的组合中的平均矩形。调节程序中的缺省参数(scale_factor=1.1, min_neighbors=3, flags=0)用于对目标进行更精确同时也是耗时较长的进一步检测。为了能对视频图像进行更快的实时检测,参数设置通常是:scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size> (例如, 对于视频会议的图像区域).
cvSetImagesForHaarClassifierCascade
为隐藏的cascade(hidden cascade)指定图像
void cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade,
                                         const CvArr* sum, const CvArr* sqsum,
                                         const CvArr* tilted_sum, double scale );
cascade 隐藏 Harr 分类器级联 (Hidden Haar classifier cascade), 由函数 cvCreateHidHaarClassifierCascade生成 
sum 32-比特,单通道图像的积分图像(Integral (sum) 单通道 image of 32-比特 integer format). 这幅图像以及随后的两幅用于对快速特征的评价和亮度/对比度的归一化。 它们都可以利用函数 cvIntegral从8-比特或浮点数 单通道的输入图像中得到。 
sqsum 单通道64比特图像的平方和图像 
tilted_sum 单通道32比特整数格式的图像的倾斜和(Tilted sum) 
scale cascade的窗口比例. 如果 scale=1, 就只用原始窗口尺寸检测 (只检测同样尺寸大小的目标物体) - 原始窗口尺寸在函数cvLoadHaarClassifierCascade中定义 (在 "<default_face_cascade>"中缺省为24x24), 如果scale=2, 使用的窗口是上面的两倍 (在face cascade中缺省值是48x48 )。 这样尽管可以将检测速度提高四倍,但同时尺寸小于48x48的人脸将不能被检测到。 
函数 cvSetImagesForHaarClassifierCascade 为hidden classifier cascade 指定图像 and/or 窗口比例系数。 如果图像指针为空,会继续使用原来的图像(i.e. NULLs 意味这"不改变图像")。比例系数没有 "protection" 值,但是原来的值可以通过函数 cvGetHaarClassifierCascadeScale 重新得到并使用。这个函数用于对特定图像中检测特定目标尺寸的cascade分类器的设定。函数通过cvHaarDetectObjects进行内部调用,但当需要在更低一层的函数cvRunHaarClassifierCascade中使用的时候,用户也可以自行调用。 
cvRunHaarClassifierCascade
在给定位置的图像中运行 cascade of boosted classifier 
int cvRunHaarClassifierCascade( CvHaarClassifierCascade* cascade,
                               CvPoint pt, int start_stage=0 );
cascade Haar 级联分类器 
pt 待检测区域的左上角坐标。待检测区域大小为原始窗口尺寸乘以当前设定的比例系数。当前窗口尺寸可以通过cvGetHaarClassifierCascadeWindowSize重新得到。 
start_stage 级联层的初始下标值(从0开始计数)。函数假定前面所有每层的分类器都已通过。这个特征通过函数cvHaarDetectObjects内部调用,用于更好的处理器高速缓冲存储器。 
函数 cvRunHaarHaarClassifierCascade 用于对单幅图片的检测。在函数调用前首先利用 cvSetImagesForHaarClassifierCascade设定积分图和合适的比例系数 (=> 窗口尺寸)。当分析的矩形框全部通过级联分类器每一层的时返回正值(这是一个候选目标),否则返回0或负值。






使用针对某目标物体训练的级联分类器在图像中找到包含目标物体的矩形区域,并且将这些区域作为一序列的矩形框返。


code:

[cpp]  view plain  copy
  1. #include "cv.h"    
  2. #include "highgui.h"    
  3. //读取训练好的分类器。    
  4. CvHaarClassifierCascade* load_object_detector( const char* cascade_path )    
  5. {    
  6.    return (CvHaarClassifierCascade*)cvLoad( cascade_path );    
  7. }    
  8.     
  9. void detect_and_draw_objects( IplImage* image,    
  10.                              CvHaarClassifierCascade* cascade,    
  11.                              int do_pyramids )    
  12. {    
  13.    IplImage* small_image = image;    
  14.    CvMemStorage* storage = cvCreateMemStorage(0); //创建动态内存    
  15.    CvSeq* faces;    
  16.    int i, scale = 1;    
  17.    /* if the flag is specified, down-scale the 输入图像 to get a  
  18.       performance boost w/o loosing quality (perhaps) */    
  19.    if( do_pyramids )    
  20.    {    
  21.        small_image = cvCreateImage( cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3 );    
  22.        cvPyrDown( image, small_image, CV_GAUSSIAN_5x5 );//函数 cvPyrDown 使用 Gaussian 金字塔分解对输入图像向下采样。首先它对输入图像用指定滤波器进行卷积,然后通过拒绝偶数的行与列来下采样图像。    
  23.        scale = 2;    
  24.    }    
  25.    /* use the fastest variant */    
  26.    faces = cvHaarDetectObjects( small_image, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING );    
  27.    /* draw all the rectangles */    
  28.    for( i = 0; i < faces->total; i++ )    
  29.    {    
  30.        /* extract the rectanlges only */    
  31.        CvRect face_rect = *(CvRect*)cvGetSeqElem( faces, i, 0 );    
  32.        cvRectangle( image, cvPoint(face_rect.x*scale,face_rect.y*scale),    
  33.                     cvPoint((face_rect.x+face_rect.width)*scale,    
  34.                             (face_rect.y+face_rect.height)*scale),    
  35.                     CV_RGB(255,0,0), 3 );    
  36.    }    
  37.    if( small_image != image )    
  38.        cvReleaseImage( &small_image );    
  39.    cvReleaseMemStorage( &storage );  //释放动态内存    
  40. }    
  41. /* takes image filename and cascade path from the command line */    
  42. int main( int argc, char** argv )    
  43. {    
  44.    IplImage* image;    
  45.    if( argc==3 && (image = cvLoadImage( argv[1], 1 )) != 0 )    
  46.    {    
  47.        CvHaarClassifierCascade* cascade = load_object_detector(argv[2]);    
  48.        detect_and_draw_objects( image, cascade, 1 );    
  49.        cvNamedWindow( "test", 0 );    
  50.        cvShowImage( "test", image );    
  51.        cvWaitKey(0);    
  52.        cvReleaseHaarClassifierCascade( &cascade );    
  53.        cvReleaseImage( &image );    
  54.    }    
  55.    return 0;    
  56. }    


========================================================================================================


以下来自: http://blog.sina.com.cn/s/blog_790bb7190100qm66.html

OpenCV的人脸检测主要是调用训练好的cascade(Haar分类器)来进行模式匹配。

cvHaarDetectObjects,先将图像灰度化,根据传入参数判断是否进行canny边缘处理(默认不使用),再进行匹配。匹配后收集找出的匹配块,过滤噪声,计算相邻个数如果超过了规定值(传入的min_neighbors)就当成输出结果,否则删去。

匹配循环:将匹配分类器放大scale(传入值)倍,同时原图缩小scale倍,进行匹配,直到匹配分类器的大小大于原图,则返回匹配结果。匹配的时候调用cvRunHaarClassifierCascade来进行匹配,将所有结果存入CvSeq* Seq (可动态增长元素序列),将结果传给cvHaarDetectObjects。

cvRunHaarClassifierCascade函数整体是根据传入的图像和cascade来进行匹配。并且可以根据传入的cascade类型不同(树型、stump(不完整的树)或其他的),进行不同的匹配方式。

函数 cvRunHaarClassifierCascade 用于对单幅图片的检测。在函数调用前首先利用 cvSetImagesForHaarClassifierCascade设定积分图和合适的比例系数 (=> 窗口尺寸)。当分析的矩形框全部通过级联分类器每一层的时返回正值(这是一个候选目标),否则返回0或负值。

为了了解OpenCV人脸检测中寻找匹配图像的详细过程,就把cvHaarDetectObjects和cvRunHaarClassifierCascade的源文件详细看了一遍,并打上了注释。方便大家阅读。

附cvHaarDetectObjects代码:

[cpp]  view plain  copy
  1. CV_IMPL CvSeq*  
  2. cvHaarDetectObjects( const CvArr* _img,   
  3.                      CvHaarClassifierCascade* cascade,  
  4.                      CvMemStorage* storage, double scale_factor,  
  5.                      int min_neighbors, int flags, CvSize min_size )  
  6. {  
  7.     int split_stage = 2;  
  8.    
  9.     CvMat stub, *img = (CvMat*)_img;                                                           //CvMat多通道矩阵  *img=_img指针代换传入图  
  10.     CvMat *temp = 0, *sum = 0, *tilted = 0, *sqsum = 0, *norm_img = 0, *sumcanny = 0, *img_small = 0;  
  11.     CvSeq* seq = 0;  
  12.     CvSeq* seq2 = 0;                                                                           //CvSeq可动态增长元素序列  
  13.     CvSeq* idx_seq = 0;  
  14.     CvSeq* result_seq = 0;  
  15.     CvMemStorage* temp_storage = 0;  
  16.     CvAvgComp* comps = 0;  
  17.     int i;  
  18.      
  19. #ifdef _OPENMP  
  20.     CvSeq* seq_thread[CV_MAX_THREADS] = {0};  
  21.     int max_threads = 0;  
  22. #endif  
  23.      
  24.     CV_FUNCNAME( “cvHaarDetectObjects” );  
  25.    
  26.     __BEGIN__;  
  27.    
  28.     double factor;  
  29.     int npass = 2, coi;                                                                                                                 //npass=2  
  30.     int do_canny_pruning = flags & CV_HAAR_DO_CANNY_PRUNING;                 //true做canny边缘处理  
  31.    
  32.     if( !CV_IS_HAAR_CLASSIFIER(cascade) )  
  33.         CV_ERROR( !cascade ? CV_StsNullPtr : CV_StsBadArg, “Invalid classifier cascade” );  
  34.    
  35.     if( !storage )  
  36.         CV_ERROR( CV_StsNullPtr, “Null storage pointer” );  
  37.    
  38.     CV_CALL( img = cvGetMat( img, &stub, &coi ));  
  39.     if( coi )  
  40.         CV_ERROR( CV_BadCOI, “COI is not supported” );                                    //一些出错代码  
  41.    
  42.     if( CV_MAT_DEPTH(img->type) != CV_8U )  
  43.         CV_ERROR( CV_StsUnsupportedFormat, “Only 8-bit images are supported” );  
  44.    
  45.     CV_CALL( temp = cvCreateMat( img->rows, img->cols, CV_8UC1 ));  
  46.     CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 ));  
  47.     CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_64FC1 ));  
  48.     CV_CALL( temp_storage = cvCreateChildMemStorage( storage ));  
  49.    
  50. #ifdef _OPENMP  
  51.     max_threads = cvGetNumThreads();  
  52.     for( i = 0; i < max_threads; i++ )  
  53.     {  
  54.         CvMemStorage* temp_storage_thread;  
  55.         CV_CALL( temp_storage_thread = cvCreateMemStorage(0));                 //CV_CALL就是运行,假如出错就报错。  
  56.         CV_CALL( seq_thread[i] = cvCreateSeq( 0, sizeof(CvSeq),                //CvSeq可动态增长元素序列  
  57.    
  58.                         sizeof(CvRect), temp_storage_thread ));  
  59.     }  
  60. #endif  
  61.    
  62.     if( !cascade->hid_cascade )  
  63.         CV_CALL( icvCreateHidHaarClassifierCascade(cascade) );  
  64.    
  65.     if( cascade->hid_cascade->has_tilted_features )  
  66.         tilted = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );         //多通道矩阵 图像长宽+1 4通道  
  67.    
  68.     seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvRect), temp_storage );        //创建序列seq  矩形  
  69.     seq2 = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), temp_storage );    //创建序列seq2  矩形和邻近  
  70.     result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), storage );   //创建序列result_seq  矩形和邻近  
  71.    
  72.     if( min_neighbors == 0 )  
  73.         seq = result_seq;  
  74.    
  75.     if( CV_MAT_CN(img->type) > 1 )  
  76.     {  
  77.         cvCvtColor( img, temp, CV_BGR2GRAY );                                  //img转为灰度  
  78.         img = temp;                                                                                                                                  
  79.     }  
  80.      
  81.     if( flags & CV_HAAR_SCALE_IMAGE )                                                                                        //flag && 匹配图  
  82.     {  
  83.         CvSize win_size0 = cascade->orig_window_size;                         //CvSize win_size0为分类器的原始大小  
  84.         int use_ipp = cascade->hid_cascade->ipp_stages != 0 &&                
  85.                     icvApplyHaarClassifier_32s32f_C1R_p != 0;                 //IPP相关函数  
  86.    
  87.         if( use_ipp )  
  88.             CV_CALL( norm_img = cvCreateMat( img->rows, img->cols, CV_32FC1 ));           //图像的矩阵化 4通道.  
  89.         CV_CALL( img_small = cvCreateMat( img->rows + 1, img->cols + 1, CV_8UC1 ));       //小图矩阵化 单通道 长宽+1  
  90.    
  91.         for( factor = 1; ; factor *= scale_factor )                                       //成scale_factor倍数匹配  
  92.         {  
  93.             int positive = 0;  
  94.             int x, y;  
  95.             CvSize win_size = { cvRound(win_size0.width*factor),  
  96.                                 cvRound(win_size0.height*factor) };                      //winsize         分类器行列(扩大factor倍)          
  97.             CvSize sz = { cvRound( img->cols/factor ), cvRound( img->rows/factor ) };     //sz 图像行列(缩小factor倍)               三个Cvsize  
  98.             CvSize sz1 = { sz.width – win_size0.width, sz.height – win_size0.height };    //sz1 图像 减分类器行列  
  99.             CvRect rect1 = { icv_object_win_border, icv_object_win_border,  
  100.                 win_size0.width – icv_object_win_border*2,                                //icv_object_win_border (int) 初始值=1  
  101.                 win_size0.height – icv_object_win_border*2 };                                  //矩形框rect1  
  102.             CvMat img1, sum1, sqsum1, norm1, tilted1, mask1;                              //多通道矩阵  
  103.             CvMat* _tilted = 0;  
  104.    
  105.             if( sz1.width <= 0 || sz1.height <= 0 )                                       //图片宽或高小于分类器–>跳出  
  106.                 break;  
  107.             if( win_size.width < min_size.width || win_size.height < min_size.height )    //分类器高或宽小于给定的mini_size的高或宽–>继续  
  108.                 continue;  
  109. //CV_8UC1见定义.  
  110. //#define CV_MAKETYPE(depth,cn) ((depth) + (((cn)-1) << CV_CN_SHIFT))     
  111. //深度+(cn-1)左移3位   depth,depth+8,depth+16,depth+24.  
  112.             img1 = cvMat( sz.height, sz.width, CV_8UC1, img_small->data.ptr );            //小图的矩阵化 img1 单通道      
  113.             sum1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, sum->data.ptr );             //长宽+1 4通道8位            多通道矩阵  
  114.             sqsum1 = cvMat( sz.height+1, sz.width+1, CV_64FC1, sqsum->data.ptr );         //长宽+1 4通道16位  
  115.             if( tilted )  
  116.             {  
  117.                 tilted1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, tilted->data.ptr );   //长宽+1 4通道8位  
  118.                 _tilted = &tilted1;                                                                  //长宽+1 4通道8位  
  119.             }  
  120.             norm1 = cvMat( sz1.height, sz1.width, CV_32FC1, norm_img ? norm_img->data.ptr : 0 ); //norm1 图像 减 分类器行列 4通道  
  121.             mask1 = cvMat( sz1.height, sz1.width, CV_8UC1, temp->data.ptr );                     //mask1 灰度图  
  122.    
  123.             cvResize( img, &img1, CV_INTER_LINEAR );                                      //img双线性插值 输出到img1  
  124.             cvIntegral( &img1, &sum1, &sqsum1, _tilted );                                                                  //计算积分图像  
  125.    
  126.             if( use_ipp && icvRectStdDev_32s32f_C1R_p( sum1.data.i, sum1.step,  
  127.                 sqsum1.data.db, sqsum1.step, norm1.data.fl, norm1.step, sz1, rect1 ) < 0 )  
  128.                 use_ipp = 0;  
  129.    
  130.             if( use_ipp )                                                                                 //如果ipp=true   (intel视频处理加速等的函数库)  
  131.             {  
  132.                 positive = mask1.cols*mask1.rows;                                                                                  //mask1长乘宽–>positive  
  133.                 cvSet( &mask1, cvScalarAll(255) );                                                                                  //mask1赋值为255  
  134.                 for( i = 0; i < cascade->count; i++ )  
  135.                 {  
  136.                     if( icvApplyHaarClassifier_32s32f_C1R_p(sum1.data.i, sum1.step,  
  137.                         norm1.data.fl, norm1.step, mask1.data.ptr, mask1.step,  
  138.                         sz1, &positive, cascade->hid_cascade->stage_classifier[i].threshold,  
  139.                         cascade->hid_cascade->ipp_stages[i]) < 0 )  
  140.                     {  
  141.                         use_ipp = 0;                                                                         //ipp=false;  
  142.                         break;  
  143.                     }  
  144.                     if( positive <= 0 )  
  145.                         break;  
  146.                 }  
  147.             }  
  148.              
  149.             if( !use_ipp )                                                                                         //如果ipp=false  
  150.             {  
  151.                 cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, 0, 1. );  
  152.                 for( y = 0, positive = 0; y < sz1.height; y++ )  
  153.                     for( x = 0; x < sz1.width; x++ )  
  154.                     {  
  155.                         mask1.data.ptr[mask1.step*y + x] =  
  156.                             cvRunHaarClassifierCascade( cascade, cvPoint(x,y), 0 ) > 0;   //匹配图像.  
  157.                         positive += mask1.data.ptr[mask1.step*y + x];  
  158.                     }  
  159.             }  
  160.    
  161.             if( positive > 0 )  
  162.             {  
  163.                 for( y = 0; y < sz1.height; y++ )  
  164.                     for( x = 0; x < sz1.width; x++ )  
  165.                         if( mask1.data.ptr[mask1.step*y + x] != 0 )  
  166.                         {  
  167.                             CvRect obj_rect = { cvRound(y*factor), cvRound(x*factor),      
  168.                                                 win_size.width, win_size.height };  
  169.                             cvSeqPush( seq, &obj_rect );                                        //将匹配块放到seq中  
  170.                         }  
  171.             }  
  172.         }  
  173.     }  
  174.     else                                                                                                        //!(flag && 匹配图)  
  175.     {  
  176.         cvIntegral( img, sum, sqsum, tilted );  
  177.      
  178.         if( do_canny_pruning )  
  179.         {  
  180.             sumcanny = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );                  //如果 做canny边缘检测  
  181.             cvCanny( img, temp, 0, 50, 3 );  
  182.             cvIntegral( temp, sumcanny );  
  183.         }  
  184.      
  185.         if( (unsigned)split_stage >= (unsigned)cascade->count ||  
  186.             cascade->hid_cascade->is_tree )                                                                                                                  
  187.         {  
  188.             split_stage = cascade->count;  
  189.             npass = 1;  
  190.         }  
  191.    
  192.         for( factor = 1; factor*cascade->orig_window_size.width < img->cols – 10 &&                                //匹配  
  193.                          factor*cascade->orig_window_size.height < img->rows – 10;  
  194.              factor *= scale_factor )  
  195.         {  
  196.             const double ystep = MAX( 2, factor );  
  197.             CvSize win_size = { cvRound( cascade->orig_window_size.width * factor ),  
  198.                                 cvRound( cascade->orig_window_size.height * factor )};  
  199.             CvRect equ_rect = { 0, 0, 0, 0 };  
  200.             int *p0 = 0, *p1 = 0, *p2 = 0, *p3 = 0;  
  201.             int *pq0 = 0, *pq1 = 0, *pq2 = 0, *pq3 = 0;  
  202.             int pass, stage_offset = 0;  
  203.             int stop_height = cvRound((img->rows – win_size.height) / ystep);  
  204.    
  205.             if( win_size.width < min_size.width || win_size.height < min_size.height )                        //超边跳出  
  206.                 continue;  
  207.    
  208.             cvSetImagesForHaarClassifierCascade( cascade, sum, sqsum, tilted, factor );                        //匹配  
  209.             cvZero( temp );                                                                                                //清空temp数组  
  210.    
  211.             if( do_canny_pruning )                                                                                        //canny边缘检测  
  212.             {  
  213.                 equ_rect.x = cvRound(win_size.width*0.15);  
  214.                 equ_rect.y = cvRound(win_size.height*0.15);  
  215.                 equ_rect.width = cvRound(win_size.width*0.7);  
  216.                 equ_rect.height = cvRound(win_size.height*0.7);  
  217.    
  218.                 p0 = (int*)(sumcanny->data.ptr + equ_rect.y*sumcanny->step) + equ_rect.x;  
  219.                 p1 = (int*)(sumcanny->data.ptr + equ_rect.y*sumcanny->step)  
  220.                             + equ_rect.x + equ_rect.width;  
  221.                 p2 = (int*)(sumcanny->data.ptr + (equ_rect.y + equ_rect.height)*sumcanny->step) + equ_rect.x;  
  222.                 p3 = (int*)(sumcanny->data.ptr + (equ_rect.y + equ_rect.height)*sumcanny->step)  
  223.                             + equ_rect.x + equ_rect.width;  
  224.    
  225.                 pq0 = (int*)(sum->data.ptr + equ_rect.y*sum->step) + equ_rect.x;  
  226.                 pq1 = (int*)(sum->data.ptr + equ_rect.y*sum->step)  
  227.                             + equ_rect.x + equ_rect.width;  
  228.                 pq2 = (int*)(sum->data.ptr + (equ_rect.y + equ_rect.height)*sum->step) + equ_rect.x;  
  229.                 pq3 = (int*)(sum->data.ptr + (equ_rect.y + equ_rect.height)*sum->step)  
  230.                             + equ_rect.x + equ_rect.width;  
  231.             }  
  232.    
  233.             cascade->hid_cascade->count = split_stage;                                                //分裂级  
  234.    
  235.             for( pass = 0; pass < npass; pass++ )  
  236.             {  
  237. #ifdef _OPENMP  
  238.     #pragma omp parallel for num_threads(max_threads), schedule(dynamic)  
  239. #endif  
  240.                 forint _iy = 0; _iy < stop_height; _iy++ )  
  241.                 {  
  242.                     int iy = cvRound(_iy*ystep);  
  243.                     int _ix, _xstep = 1;  
  244.                     int stop_width = cvRound((img->cols – win_size.width) / ystep);  
  245.                     uchar* mask_row = temp->data.ptr + temp->step * iy;  
  246.    
  247.                     for( _ix = 0; _ix < stop_width; _ix += _xstep )  
  248.                     {  
  249.                         int ix = cvRound(_ix*ystep); // it really should be ystep  
  250.                      
  251.                         if( pass == 0 )                                                   //第一次循环 做  
  252.                         {  
  253.                             int result;  
  254.                             _xstep = 2;  
  255.    
  256.                             if( do_canny_pruning )                                                        //canny边缘检测  
  257.                             {  
  258.                                 int offset;  
  259.                                 int s, sq;  
  260.                          
  261.                                 offset = iy*(sum->step/sizeof(p0[0])) + ix;  
  262.                                 s = p0[offset] – p1[offset] – p2[offset] + p3[offset];  
  263.                                 sq = pq0[offset] – pq1[offset] – pq2[offset] + pq3[offset];  
  264.                                 if( s < 100 || sq < 20 )  
  265.                                     continue;  
  266.                             }  
  267.    
  268.                             result = cvRunHaarClassifierCascade( cascade, cvPoint(ix,iy), 0 );                //匹配结果存到result里  
  269.                             if( result > 0 )  
  270.                             {  
  271.                                 if( pass < npass – 1 )  
  272.                                     mask_row[ix] = 1;  
  273.                                 else  
  274.                                 {  
  275.                                     CvRect rect = cvRect(ix,iy,win_size.width,win_size.height);  
  276. #ifndef _OPENMP                                                                                                        //如果用OpenMP  
  277.                                     cvSeqPush( seq, &rect );                                                        //result 放到seq中  
  278. #else                                                                                                                        //如果不用OpenMP  
  279.                                     cvSeqPush( seq_thread[omp_get_thread_num()], &rect );                        //result放到seq_thread里  
  280. #endif  
  281.                                 }  
  282.                             }  
  283.                             if( result < 0 )  
  284.                                 _xstep = 1;  
  285.                         }  
  286.                         else if( mask_row[ix] )                     //不是第一次  
  287.                         {  
  288.                             int result = cvRunHaarClassifierCascade( cascade, cvPoint(ix,iy),  
  289.                                                                      stage_offset );  
  290.                             if( result > 0 )  
  291.                             {  
  292.                                 if( pass == npass – 1 )          //如果是最后一次  
  293.                                 {  
  294.                                     CvRect rect = cvRect(ix,iy,win_size.width,win_size.height);  
  295. #ifndef _OPENMP  
  296.                                     cvSeqPush( seq, &rect );  
  297. #else  
  298.                                     cvSeqPush( seq_thread[omp_get_thread_num()], &rect );  
  299. #endif  
  300.                                 }  
  301.                             }  
  302.                             else  
  303.                                 mask_row[ix] = 0;  
  304.                         }  
  305.                     }  
  306.                 }  
  307.                 stage_offset = cascade->hid_cascade->count;  
  308.                 cascade->hid_cascade->count = cascade->count;  
  309.             }  
  310.         }  
  311.     }  
  312.    
  313. #ifdef _OPENMP  
  314. // gather the results                                               //收集结果  
  315. for( i = 0; i < max_threads; i++ )  
  316. {  
  317. CvSeq* s = seq_thread[i];  
  318.         int j, total = s->total;  
  319.         CvSeqBlock* b = s->first;  
  320.         for( j = 0; j < total; j += b->count, b = b->next )  
  321.             cvSeqPushMulti( seq, b->data, b->count );                  //结果输出到seq  
  322. }  
  323. #endif  
  324.    
  325.     if( min_neighbors != 0 )  
  326.     {  
  327.         // group retrieved rectangles in order to filter out noise         收集找出的匹配块,过滤噪声  
  328.         int ncomp = cvSeqPartition( seq, 0, &idx_seq, is_equal, 0 );  
  329.         CV_CALL( comps = (CvAvgComp*)cvAlloc( (ncomp+1)*sizeof(comps[0])));  
  330.         memset( comps, 0, (ncomp+1)*sizeof(comps[0]));  
  331.    
  332.         // count number of neighbors                                   计算相邻个数  
  333.         for( i = 0; i < seq->total; i++ )  
  334.         {  
  335.             CvRect r1 = *(CvRect*)cvGetSeqElem( seq, i );  
  336.             int idx = *(int*)cvGetSeqElem( idx_seq, i );  
  337.             assert( (unsigned)idx < (unsigned)ncomp );  
  338.    
  339.             comps[idx].neighbors++;  
  340.               
  341.             comps[idx].rect.x += r1.x;  
  342.             comps[idx].rect.y += r1.y;  
  343.             comps[idx].rect.width += r1.width;  
  344.             comps[idx].rect.height += r1.height;  
  345.         }  
  346.    
  347.         // calculate average bounding box                                    计算重心  
  348.         for( i = 0; i < ncomp; i++ )  
  349.         {  
  350.             int n = comps[i].neighbors;  
  351.             if( n >= min_neighbors )  
  352.             {  
  353.                 CvAvgComp comp;  
  354.                 comp.rect.x = (comps[i].rect.x*2 + n)/(2*n);  
  355.                 comp.rect.y = (comps[i].rect.y*2 + n)/(2*n);  
  356.                 comp.rect.width = (comps[i].rect.width*2 + n)/(2*n);  
  357.                 comp.rect.height = (comps[i].rect.height*2 + n)/(2*n);  
  358.                 comp.neighbors = comps[i].neighbors;  
  359.    
  360.                 cvSeqPush( seq2, &comp );                                   //结果输入到seq2  
  361.             }  
  362.         }  
  363.    
  364.         // filter out small face rectangles inside large face rectangles                在大的面块中找出小的面块  
  365.         for( i = 0; i < seq2->total; i++ )                                                                                //在seq2中寻找  
  366.         {  
  367.             CvAvgComp r1 = *(CvAvgComp*)cvGetSeqElem( seq2, i );                //r1指向结果  
  368.             int j, flag = 1;  
  369.    
  370.             for( j = 0; j < seq2->total; j++ )  
  371.             {  
  372.                 CvAvgComp r2 = *(CvAvgComp*)cvGetSeqElem( seq2, j );  
  373.                 int distance = cvRound( r2.rect.width * 0.2 );  
  374.              
  375.                 if( i != j &&  
  376.                     r1.rect.x >= r2.rect.x – distance &&  
  377.                     r1.rect.y >= r2.rect.y – distance &&  
  378.                     r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&  
  379.                     r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&  
  380.                     (r2.neighbors > MAX( 3, r1.neighbors ) || r1.neighbors < 3) )  
  381.                 {  
  382.                     flag = 0;  
  383.                     break;  
  384.                 }  
  385.             }  
  386.    
  387.             if( flag )  
  388.             {  
  389.                 cvSeqPush( result_seq, &r1 );      //添加r1到返回结果.  
  390.                  
  391.             }  
  392.         }  
  393.     }  
  394.    
  395.     __END__;  
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
视频人脸识别,取代jmf。。。 Introduction JavaCV uses wrappers from the JavaCPP Presets of commonly used libraries by researchers in the field of computer vision (OpenCV, FFmpeg, libdc1394, PGR FlyCapture, OpenKinect, librealsense, CL PS3 Eye Driver, videoInput, ARToolKitPlus, and flandmark), and provides utility classes to make their functionality easier to use on the Java platform, including Android. JavaCV also comes with hardware accelerated full-screen image display (CanvasFrame and GLCanvasFrame), easy-to-use methods to execute code in parallel on multiple cores (Parallel), user-friendly geometric and color calibration of cameras and projectors (GeometricCalibrator, ProCamGeometricCalibrator, ProCamColorCalibrator), detection and matching of feature points (ObjectFinder), a set of classes that implement direct image alignment of projector-camera systems (mainly GNImageAligner, ProjectiveTransformer, ProjectiveColorTransformer, ProCamTransformer, and ReflectanceInitializer), a blob analysis package (Blobs), as well as miscellaneous functionality in the JavaCV class. Some of these classes also have an OpenCL and OpenGL counterpart, their names ending with CL or starting with GL, i.e.: JavaCVCL, GLCanvasFrame, etc. To learn how to use the API, since documentation currently lacks, please refer to the Sample Usage section below as well as the sample programs, including two for Android (FacePreview.java and RecordActivity.java), also found in the samples directory. You may also find it useful to refer to the source code of ProCamCalib and ProCamTracker as well as examples ported from OpenCV2 Cookbook and the associated wiki pages. Please keep me informed of any updates or fixes you make to the code so that I may integrate them into the next release. Thank you! And feel free to ask questions on the mailing list if you encounter any problems with the software! I am sure it is far from perfect... Downloads To install manually the JAR files, obtain the following archives and follow the instructions in the Manual Installation section below. JavaCV 1.3.3 binary archive javacv-platform-1.3.3-bin.zip (212 MB) JavaCV 1.3.3 source archive javacv-platform-1.3.3-src.zip (456 KB) The binary archive contains builds for Android, Linux, Mac OS X, and Windows. The JAR files for specific child modules or platforms can also be obtained individually from the Maven Central Repository. We can also have everything downloaded and installed automatically with: Maven (inside the pom.xml file) <dependency> <groupId>org.bytedeco</groupId> <artifactId>javacv-platform</artifactId> <version>1.3.3</version> </dependency> Gradle (inside the build.gradle file) dependencies { compile group: 'org.bytedeco', name: 'javacv-platform', version: '1.3.3' } sbt (inside the build.sbt file) libraryDependencies += "org.bytedeco" % "javacv-platform" % "1.3.3" This downloads binaries for all platforms, but to get binaries for only one platform we can set the javacpp.platform system property (via the -D command line option) to something like android-arm, linux-x86_64, macosx-x86_64, windows-x86_64, etc. Please refer to the README.md file of the JavaCPP Presets for details. Another option available for Scala users is sbt-javacv. Required Software To use JavaCV, you will first need to download and install the following software: An implementation of Java SE 7 or newer: OpenJDK http://openjdk.java.net/install/ or Sun JDK http://www.oracle.com/technetwork/java/javase/downloads/ or IBM JDK http://www.ibm.com/developerworks/java/jdk/ Further, although not always required, some functionality of JavaCV also relies on: CL Eye Platform SDK (Windows only) http://codelaboratories.com/downloads/ Android SDK API 14 or newer http://developer.android.com/sdk/ JOCL and JOGL from JogAmp http://jogamp.org/ Finally, please make sure everything has the same bitness: 32-bit and 64-bit modules do not mix under any circumstances. Manual Installation Simply put all the desired JAR files (opencv*.jar, ffmpeg*.jar, etc.), in addition to javacpp.jar and javacv.jar, somewhere in your class path. Here are some more specific instructions for common cases: NetBeans (Java SE 7 or newer): In the Projects window, right-click the Libraries node of your project, and select "Add JAR/Folder...". Locate the JAR files, select them, and click OK. Eclipse (Java SE 7 or newer): Navigate to Project > Properties > Java Build Path > Libraries and click "Add External JARs...". Locate the JAR files, select them, and click OK. IntelliJ IDEA (Android 4.0 or newer): Follow the instructions on this page: http://developer.android.com/training/basics/firstapp/ Copy all the JAR files into the app/libs subdirectory. Navigate to File > Project Structure > app > Dependencies, click +, and select "2 File dependency". Select all the JAR files from the libs subdirectory. After that, the wrapper classes for OpenCV and FFmpeg, for example, can automatically access all of their C/C++ APIs: OpenCV documentation FFmpeg documentation Sample Usage The class definitions are basically ports to Java of the original header files in C/C++, and I deliberately decided to keep as much of the original syntax as possible. For example, here is a method that tries to load an image file, smooth it, and save it back to disk: import static org.bytedeco.javacpp.opencv_core.*; import static org.bytedeco.javacpp.opencv_imgproc.*; import static org.bytedeco.javacpp.opencv_imgcodecs.*; public class Smoother { public static void smooth(String filename) { IplImage image = cvLoadImage(filename); if (image != null) { cvSmooth(image, image); cvSaveImage(filename, image); cvReleaseImage(image); } } } JavaCV also comes with helper classes and methods on top of OpenCV and FFmpeg to facilitate their integration to the Java platform. Here is a small demo program demonstrating the most frequently useful parts: import java.io.File; import java.net.URL; import org.bytedeco.javacv.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.indexer.*; import static org.bytedeco.javacpp.opencv_core.*; import static org.bytedeco.javacpp.opencv_imgproc.*; import static org.bytedeco.javacpp.opencv_calib3d.*; import static org.bytedeco.javacpp.opencv_objdetect.*; public class Demo { public static void main(String[] args) throws Exception { String classifierName = null; if (args.length > 0) { classifierName = args[0]; } else { URL url = new URL("https://raw.github.com/Itseez/opencv/2.4.0/data/haarcascades/haarcascade_frontalface_alt.xml"); File file = Loader.extractResource(url, null, "classifier", ".xml"); file.deleteOnExit(); classifierName = file.getAbsolutePath(); } // Preload the opencv_objdetect module to work around a known bug. Loader.load(opencv_objdetect.class); // We can "cast" Pointer objects by instantiating a new object of the desired class. CvHaarClassifierCascade classifier = new CvHaarClassifierCascade(cvLoad(classifierName)); if (classifier.isNull()) { System.err.println("Error loading classifier file \"" + classifierName + "\"."); System.exit(1); } // The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio), // DC1394FrameGrabber, FlyCaptureFrameGrabber, OpenKinectFrameGrabber, OpenKinect2FrameGrabber, // RealSenseFrameGrabber, PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber. FrameGrabber grabber = FrameGrabber.createDefault(0); grabber.start(); // CanvasFrame, FrameGrabber, and FrameRecorder use Frame objects to communicate image data. // We need a FrameConverter to interface with other APIs (Android, Java 2D, or OpenCV). OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); // FAQ about IplImage and Mat objects from OpenCV: // - For custom raw processing of data, createBuffer() returns an NIO direct // buffer wrapped around the memory pointed by imageData, and under Android we can // also use that Buffer with Bitmap.copyPixelsFromBuffer() and copyPixelsToBuffer(). // - To get a BufferedImage from an IplImage, or vice versa, we can chain calls to // Java2DFrameConverter and OpenCVFrameConverter, one after the other. // - Java2DFrameConverter also has static copy() methods that we can use to transfer // data more directly between BufferedImage and IplImage or Mat via Frame objects. IplImage grabbedImage = converter.convert(grabber.grab()); int width = grabbedImage.width(); int height = grabbedImage.height(); IplImage grayImage = IplImage.create(width, height, IPL_DEPTH_8U, 1); IplImage rotatedImage = grabbedImage.clone(); // Objects allocated with a create*() or clone() factory method are automatically released // by the garbage collector, but may still be explicitly released by calling release(). // You shall NOT call cvReleaseImage(), cvReleaseMemStorage(), etc. on objects allocated this way. CvMemStorage storage = CvMemStorage.create(); // The OpenCVFrameRecorder class simply uses the CvVideoWriter of opencv_videoio, // but FFmpegFrameRecorder also exists as a more versatile alternative. FrameRecorder recorder = FrameRecorder.createDefault("output.avi", width, height); recorder.start(); // CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated. // It can also switch into full-screen mode when called with a screenNumber. // We should also specify the relative monitor/camera response for proper gamma correction. CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma()); // Let's create some random 3D rotation... CvMat randomR = CvMat.create(3, 3), randomAxis = CvMat.create(3, 1); // We can easily and efficiently access the elements of matrices and images // through an Indexer object with the set of get() and put() methods. DoubleIndexer Ridx = randomR.createIndexer(), axisIdx = randomAxis.createIndexer(); axisIdx.put(0, (Math.random()-0.5)/4, (Math.random()-0.5)/4, (Math.random()-0.5)/4); cvRodrigues2(randomAxis, randomR, null); double f = (width + height)/2.0; Ridx.put(0, 2, Ridx.get(0, 2)*f); Ridx.put(1, 2, Ridx.get(1, 2)*f); Ridx.put(2, 0, Ridx.get(2, 0)/f); Ridx.put(2, 1, Ridx.get(2, 1)/f); System.out.println(Ridx); // We can allocate native arrays using constructors taking an integer as argument. CvPoint hatPoints = new CvPoint(3); while (frame.isVisible() && (grabbedImage = converter.convert(grabber.grab())) != null) { cvClearMemStorage(storage); // Let's try to detect some faces! but we need a grayscale image... cvCvtColor(grabbedImage, grayImage, CV_BGR2GRAY); CvSeq faces = cvHaarDetectObjects(grayImage, classifier, storage, 1.1, 3, CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH); int total = faces.total(); for (int i = 0; i < total; i++) { CvRect r = new CvRect(cvGetSeqElem(faces, i)); int x = r.x(), y = r.y(), w = r.width(), h = r.height(); cvRectangle(grabbedImage, cvPoint(x, y), cvPoint(x+w, y+h), CvScalar.RED, 1, CV_AA, 0); // To access or pass as argument the elements of a native array, call position() before. hatPoints.position(0).x(x-w/10) .y(y-h/10); hatPoints.position(1).x(x+w*11/10).y(y-h/10); hatPoints.position(2).x(x+w/2) .y(y-h/2); cvFillConvexPoly(grabbedImage, hatPoints.position(0), 3, CvScalar.GREEN, CV_AA, 0); } // Let's find some contours! but first some thresholding... cvThreshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY); // To check if an output argument is null we may call either isNull() or equals(null). CvSeq contour = new CvSeq(null); cvFindContours(grayImage, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); while (contour != null && !contour.isNull()) { if (contour.elem_size() > 0) { CvSeq points = cvApproxPoly(contour, Loader.sizeof(CvContour.class), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0); cvDrawContours(grabbedImage, points, CvScalar.BLUE, CvScalar.BLUE, -1, 1, CV_AA); } contour = contour.h_next(); } cvWarpPerspective(grabbedImage, rotatedImage, randomR); Frame rotatedFrame = converter.convert(rotatedImage); frame.showImage(rotatedFrame); recorder.record(rotatedFrame); } frame.dispose(); recorder.stop(); grabber.stop(); } } Furthermore, after creating a pom.xml file with the following content: <project> <modelVersion>4.0.0</modelVersion> <groupId>org.bytedeco.javacv</groupId> <artifactId>demo</artifactId> <version>1.3.3</version> <dependencies> <dependency> <groupId>org.bytedeco</groupId> <artifactId>javacv-platform</artifactId> <version>1.3.3</version> </dependency> </dependencies> </project> And by placing the source code above in src/main/java/Demo.java, we can use the following command to have everything first installed automatically and then executed by Maven: $ mvn compile exec:java -Dexec.mainClass=Demo Build Instructions If the binary files available above are not enough for your needs, you might need to rebuild them from the source code. To this end, the project files were created for: Maven 3.x http://maven.apache.org/download.html JavaCPP 1.3 https://github.com/bytedeco/javacpp JavaCPP Presets 1.3 https://github.com/bytedeco/javacpp-presets Once installed, simply call the usual mvn install command for JavaCPP, its Presets, and JavaCV. By default, no other dependencies than a C++ compiler for JavaCPP are required. Please refer to the comments inside the pom.xml files for further details. Project lead: Samuel Audet [samuel.audet at gmail.com](mailto:samuel.audet at gmail.com) Developer site: https://github.com/bytedeco/javacv Discussion group: http://groups.google.com/group/javacv
以下是一个 Delphi11 中使用 OpenCV 进行人脸对比的示例代码: ``` uses ..., cvtypes, cvimgproc, cvhighgui, cvhaartraining; function CompareFaces(const Image1, Image2: IplImage): Double; var Cascade: CvHaarClassifierCascade; Storage: PCvMemStorage; Faces1, Faces2: CvSeq; FaceRect1, FaceRect2: TCvRect; i: Integer; TotalDiff: Double; begin // 加载人脸检测器 Cascade := cvLoad('haarcascade_frontalface_alt.xml', nil, nil, nil); // 分配内存 Storage := cvCreateMemStorage(0); // 检测图像1中的人脸 Faces1 := cvHaarDetectObjects(Image1, Cascade, Storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50, 50)); // 检测图像2中的人脸 Faces2 := cvHaarDetectObjects(Image2, Cascade, Storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50, 50)); // 计算两张图像中所有人脸的相似度 TotalDiff := 0; for i := 0 to Min(Faces1.total, Faces2.total) - 1 do begin // 获取人脸区域 FaceRect1 := PCvRect(cvGetSeqElem(Faces1, i))^; FaceRect2 := PCvRect(cvGetSeqElem(Faces2, i))^; // 裁剪出人脸图像 cvSetImageROI(Image1, FaceRect1); cvSetImageROI(Image2, FaceRect2); // 计算人脸图像的相似度 TotalDiff := TotalDiff + cvNorm(Image1, Image2, CV_L2); // 重置图像区域 cvResetImageROI(Image1); cvResetImageROI(Image2); end; // 释放内存 cvReleaseHaarClassifierCascade(Cascade); cvReleaseMemStorage(@Storage); // 计算平均相似度 Result := TotalDiff / Min(Faces1.total, Faces2.total); end; ``` 调用 `CompareFaces()` 函数并传入两张需要比较的图像,函数会返回两张图像中所有人脸的平均相似度。请注意,此代码仅用于演示,实际应用中需要根据具体情况进行修改和优化。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值