OpenCV的demo不够精确的处理

 在网上下载的许多OpenCV demo做图像相似度处理,总是不尽人意。哪怕自定义颜色匹配的各种阀值设置成easy模式。匹配仍不尽人意。

这里说的图片相似度判断的demo中是先进行四种值的判断然后再进行边缘判断,是否匹配。那四种值分别是:1. 相关度判断;2.卡方;3.直方图相交值;4.Bhattacharyya距离

但是哪怕进行了这些判断,加边缘匹配后,常常会有明显区别却判断成YES。可谓是精度太低。


随后我们进行了思考。如何处理这种问题。废话不多说,直接说答案,我们将要对比的两张图片分解成多个区域。将每个区域裁剪成一个个小图片。2张图片的对应区域进行上述对比,当每个区域的图片都判断为YES时,我们基本可以确定这是两张相似度很高的图片。



#import "ViewController.h"

#import "CameraImageHelper.h"

#import "Utility.h"


// 自定义的颜色匹配的各种阀值设置

#define F_CORREL_THRESH_HARD                0.65f

#define F_CORREL_INTERSECT_HARD             0.55f

#define F_CORREL_CHISQR_HARD                15.0f

#define F_CORREL_BHATTACHARYYA_HARD         0.4f

//#define F_CORREL_THRESH_HARD                0.45f

//#define F_CORREL_INTERSECT_HARD             0.40f

//#define F_CORREL_CHISQR_HARD                90.0f

//#define F_CORREL_BHATTACHARYYA_HARD         0.5f


//#define F_CORREL_THRESH_NORMAL              0.55f

//#define F_CORREL_INTERSECT_NORMAL           0.45f

//#define F_CORREL_CHISQR_NORMAL              50.0f

//#define F_CORREL_BHATTACHARYYA_NORMAL       0.45f

//

//#define F_CORREL_THRESH_EASY                0.45f

//#define F_CORREL_INTERSECT_EASY             0.40f

//#define F_CORREL_CHISQR_EASY                90.0f

//#define F_CORREL_BHATTACHARYYA_EASY         0.50f


@interface ViewController ()<UINavigationControllerDelegate,UIImagePickerControllerDelegate>


@property (strong,nonatomicUIImageView *img1;

@property (strong,nonatomicUIImageView *img2;

@property (strong,nonatomicUILabel *lab1;

@property (strong,nonatomicUIButton *chooseBtn;

- (IBAction)ChooseImage:(id)sender;

@property (nonatomic,readonly) NSTimer *m_timer;

@property (nonatomic,readonly) IplImage *m1_IplImage;

@property (nonatomic,readonly) IplImage *m2_IplImage;


@property (nonatomic,readonly) CvHistogram *m1_CvHistogram;

@property (nonatomic,readonly) CvHistogram *m2_CvHistogram;

@property(strong,nonatomic)UIImage *image;

@property(strong,nonatomic)UIImage *image1;

@property(strong,nonatomic)UIImagePickerController *picker;


@property (nonatomic,readonly) UIView *m_viewCamera;


@end


@implementation ViewController

@synthesize m_viewCamera =_viewCamera;

@synthesize m_timer =_timer;


- (void)viewDidLoad

{

    [superviewDidLoad];

   _img1=[[UIImageViewalloc]init];

   _img1.frame=CGRectMake(20,20,192,256 );

    [self.viewaddSubview:_img1];

   _img2=[[UIImageViewalloc]init];

   _img2.frame=CGRectMake(215,20,192,256);

    [self.viewaddSubview:_img2];

   _lab1=[[UILabelalloc]init];

   _lab1.frame=CGRectMake(0,276,200,40 );

    [self.viewaddSubview:_lab1];

    

    _chooseBtn=[UIButtonbuttonWithType:UIButtonTypeRoundedRect];

   _chooseBtn.frame=CGRectMake(415,20,200,40 );

    [_chooseBtnsetTitle:@"拍照"forState:UIControlStateNormal];

    [_chooseBtnaddTarget:selfaction:@selector(choose1:)forControlEvents:UIControlEventTouchUpInside];// 拍照捕获图片

    [self.viewaddSubview:_chooseBtn];

    

    [self startCamera];

// Do any additional setup after loading the view, typically from a nib.

}

- (void)startCamera

{

    [CameraImageHelper startRunning];

   _viewCamera = [CameraImageHelper previewWithBounds:CGRectMake(0,220, 384, 1024/2)];

    [self.viewaddSubview:_viewCamera];

//  实时进行图片对比

//    _timer = [NSTimer scheduledTimerWithTimeInterval:0.7f           // 0.7 秒取一张相片做匹配

//                                              target:self

//                                            selector:@selector(timerMethod:)

//                                            userInfo:nil

//                                             repeats:YES];

}

- (void)timerMethod:(NSTimer *)paramSender

{

    [self compareImgHist];

}

- (void)compareImgHist// 实时对比

{

   BOOL flag=NO;

    NSMutableArray *arr=[NSMutableArrayarrayWithObjects:_image1,nil];

   for (int i=0; i<[arr count]; i++) {

        //        UIImage *img=[arr objectAtIndex:i];

        flag=[selfLookimage:_image :_image1];

       if (flag) {

           break;

        }

    }

   if (flag) {

       _lab1.text=@"相似度:Yes";

    }else

    {

       _lab1.text=@"相似度:NO";

        

    }

    

    

}

#pragma mark -传入图片对比

-(BOOL)Lookimage: (UIImage *)image1 :(UIImage *)image2//封装的两张图片对比方法

{

   BOOL flag=NO;

   if ([self prepareimage:image1 :image2])// 将两张图片进行第一次对比,如果直接判断为NO,则直接判断相似度为NO。如果为YES,则进入下一步,将图片分割进行对应区域图片对比

    {

       float x,y;

       float width = image1.size.width;float height= image1.size.height;// 获取图片的宽高

        flag=YES;

       CGSize size=CGSizeMake(width/2, height/2);// 将两张图片等分成4块的宽高。这里的图片是因为一样大小所以就直接分成四块。如果不一样则需先将图片等比例处理成一样尺寸大小。

  

       for(int i=0;i<2;i++)

        {

           for (int j=0;j<2;j++) {

               if(i==0)

                {

                    x=0;

                }else

                {

                    x=width/2*i;

                    

                }

               if(j==0)

                {

                    y=0;

                }else

                {

                    y=height/2*j;

                }

             

               UIImage * image3=[selfgetImageFromImage:image1 subImageSize:size subImageRect:CGRectMake(x, y, size.width, size.height)];

               UIImage * image4=[selfgetImageFromImage:image2 subImageSize:size subImageRect:CGRectMake(x, y, size.width, size.height)];//  图片分割成对应区域位置图片

               if (![self prepareimage:image3 :image4]) {// 对应区域图片对比为NO时就判断相似度为NO,跳出循环

                    flag=NO;

                   break;

                }

            }

           if (!flag) {

               break;

            }

            

        }

    }

   return flag;

}

#pragma mark -图片对比方法

-(BOOL)prepareimage: (UIImage *)image1 :(UIImage *)image2

{

   IplImage *m1_IplImage = [UtilityCreateIplImageFromUIImage:image1];

   CvHistogram *m1_CvHistogram = [UtilitygetHSVHist:m1_IplImage];

   cvNormalizeHist(m1_CvHistogram, 1);

    

   IplImage *m2_IplImage = [UtilityCreateIplImageFromUIImage:image2];

   CvHistogram *m2_CvHistogram = [UtilitygetHSVHist:m2_IplImage];

   cvNormalizeHist(m2_CvHistogram, 1);

    // 颜色匹配

    // 相关:CV_COMP_CORREL

   CGFloat fCorrelThreshold = F_CORREL_THRESH_HARD;

    // 卡方:CV_COMP_CHISQR

   CGFloat fIntersectThreshold = F_CORREL_INTERSECT_HARD;

    // 直方图相交:CV_COMP_INTERSECT

   CGFloat fChisQRThreshold = F_CORREL_CHISQR_HARD;

    // Bhattacharyya距离:CV_COMP_BHATTACHARYYA

   CGFloat fBhattacharyyaThreshold = F_CORREL_BHATTACHARYYA_HARD;

    

   double  com1 = cvCompareHist(m1_CvHistogram,m2_CvHistogram,CV_COMP_CORREL);

   double  com2 = cvCompareHist(m1_CvHistogram,m2_CvHistogram,CV_COMP_INTERSECT);

   double  com3 = cvCompareHist(m1_CvHistogram,m2_CvHistogram,CV_COMP_CHISQR);

   double  com4 = cvCompareHist(m1_CvHistogram,m2_CvHistogram,CV_COMP_BHATTACHARYYA);

    NSLog(@"com1=%f,com2=%f,com3=%f,com4=%f",com1,com2,com3,com4);

   BOOL look_flag = NO;

    look_flag = ((com1 > fCorrelThreshold) && (com2 > fIntersectThreshold) && (com3 < fChisQRThreshold) && (com4 < fBhattacharyyaThreshold));

   if (look_flag) {

       // 边缘匹配

       IplImage * mode= m1_IplImage;

       IplImage * test= m2_IplImage;

        

       IplImage* bw_mode = cvCreateImage(cvGetSize(mode),mode->depth,1);

       IplImage* bw_test = cvCreateImage(cvGetSize(test),mode->depth,1);

       IplImage* canny_mode = cvCreateImage(cvGetSize(mode),mode->depth,1);

       IplImage* canny_test = cvCreateImage(cvGetSize(test),mode->depth,1);

        

       cvCvtColor(mode,bw_mode,CV_RGB2GRAY);

       cvCvtColor(test,bw_test,CV_RGB2GRAY);

        

        //model contours

       cvCanny(bw_mode,canny_mode,50,60,3);

        

        //test contours

       cvCanny(bw_test,canny_test,50,60,3);

        

       double matching = cvMatchShapes( canny_test, canny_mode, CV_CONTOURS_MATCH_I3,0);

       NSLog(@"matching =%f",matching);

       if (matching >= 0.04) {

            look_flag=NO;

        }

       cvReleaseImage(&bw_mode);

       cvReleaseImage(&bw_test);

       cvReleaseImage(&canny_mode);

       cvReleaseImage(&canny_test);

        

    }

   cvReleaseImage(&m1_IplImage);

   cvReleaseImage(&m2_IplImage);

   cvReleaseHist(&m1_CvHistogram);

   cvReleaseHist(&m2_CvHistogram);

   return look_flag;

}

#pragma mark-裁剪图片

-(UIImage *)getImageFromImage:(UIImage*) superImage subImageSize:(CGSize)subImageSize subImageRect:(CGRect)subImageRect {

   CGImageRef imageRef = superImage.CGImage;

   CGImageRef subImageRef = CGImageCreateWithImageInRect(imageRef, subImageRect);

    UIGraphicsBeginImageContext(subImageSize);

    CGContextRef context =UIGraphicsGetCurrentContext();

   CGContextDrawImage(context, subImageRect, subImageRef);

   UIImage* returnImage = [UIImageimageWithCGImage:subImageRef];

    UIGraphicsEndImageContext();//返回裁剪的部分图像

   return returnImage;

}

- (void)didReceiveMemoryWarning

{

    [superdidReceiveMemoryWarning];

    // Dispose of any resources that can be recreated.

}



-(void)choose1:(UIButton *)btn

{

   static int number=1;

   if (number%2==1) {

        _image=[[CameraImageHelperimage] copy];//获取到当前摄像头中得图像

       NSLog(@"image=%@",_image);

       _img1.image=_image;

    }else

    {

        _image1=[[CameraImageHelperimage] copy];//获取到当前摄像头中得图像

       NSLog(@"image=%@",_image1);

        _img2.image=_image1;

    }

   if(number>=2)

    {

        [selfcompareImgHist];

    }

    number++;

}

#pragma mark - picker delegate

- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info

{

    [picker dismissViewControllerAnimated:YEScompletion:^{}];

    

    UIImage *image = [infoobjectForKey:UIImagePickerControllerOriginalImage];

    

    [selfperformSelector:@selector(saveImage:)

              withObject:image

              afterDelay:0.0f];

    

}


- (void)imagePickerControllerDidCancel:(UIImagePickerController *)picker

{

    [picker dismissViewControllerAnimated:YEScompletion:^{}];

}


- (void)saveImage:(UIImage *)image

{

   CGFloat fSmallImgWidth = 320.0f;

    

   BOOL bIsRotate = NO;

    switch (image.imageOrientation)

    {

        caseUIImageOrientationUp:

        caseUIImageOrientationDown:

        caseUIImageOrientationUpMirrored:

        caseUIImageOrientationDownMirrored:

        {

            bIsRotate =NO;

            //            NSLog(@"UP [%d]", image.imageOrientation);

        }

           break;

        caseUIImageOrientationLeft:

        caseUIImageOrientationRight:

        caseUIImageOrientationLeftMirrored:

        caseUIImageOrientationRightMirrored:

        {

            bIsRotate =YES;

            //            NSLog(@"LEFT [%d]", image.imageOrientation);

        }

           break;

       default:

           break;

    }

    

   CGRect rcSubRect;

    rcSubRect.origin.x = !bIsRotate ? image.size.width/8 : image.size.height/8;

    rcSubRect.origin.y = !bIsRotate ? image.size.height/8 : image.size.width/8;

    rcSubRect.size.width = !bIsRotate ? (image.size.width - image.size.width/4) : (image.size.height - image.size.height/4);

    rcSubRect.size.height = !bIsRotate ? (image.size.height - image.size.height/4) : (image.size.width - image.size.width/4);

    

   CGImageRef subImageRef = CGImageCreateWithImageInRect(image.CGImage, rcSubRect);

   CGRect smallBounds = CGRectMake(0, 0, CGImageGetWidth(subImageRef), CGImageGetHeight(subImageRef));

    

    UIGraphicsBeginImageContext(smallBounds.size);

    CGContextRef context =UIGraphicsGetCurrentContext();

   CGContextDrawImage(context, smallBounds, subImageRef);

   UIImage* smallImage = [UIImageimageWithCGImage:subImageRef scale:1.0f orientation:image.imageOrientation];

    UIGraphicsEndImageContext();

    

   CGFloat objectWidth = !bIsRotate ? smallBounds.size.width : smallBounds.size.height;

   CGFloat objectHeight = !bIsRotate ? smallBounds.size.height : smallBounds.size.width;

   CGFloat scaledHeight = floorf(objectHeight / (objectWidth / fSmallImgWidth));

   CGSize newSize = CGSizeMake(fSmallImgWidth, scaledHeight);

    UIGraphicsBeginImageContext(newSize);

    // Tell the old image to draw in this new context, with the desired

    // new size

    [smallImage drawInRect:CGRectMake(0,0,newSize.width,newSize.height)];

    // Get the new image from the context

    UIImage* newImage =UIGraphicsGetImageFromCurrentImageContext();

    // End the context

    UIGraphicsEndImageContext();

    

    

    //

    

    // 显示原图

   self.image = [newImagecopy];

    // 原图计算直方图

    _m1_IplImage = [UtilityCreateIplImageFromUIImage:self.image];

    _m1_CvHistogram = [UtilitygetHSVHist:_m1_IplImage];

    cvNormalizeHist(_m1_CvHistogram,1);

    

    //

    

    

   CGImageRelease(subImageRef);

    

}


@end



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值