SURF配准差分算法实现

20 篇文章 1 订阅
14 篇文章 0 订阅

参考博客:http://m.blog.csdn.net/qq_15947787/article/details/55260002

简介

实现对动态背景下运动目标检测方法

硬件配置:win7_x64 + i3-4170

软件配置:vs2015 + opencv2.4.9(Release version)

注意点:opencv包是release版本,Release版本是没办法debug就会一直卡住了。

下面是Relseae-x86 run

源代码

#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/nonfree/nonfree.hpp>   
#include <windows.h>

using namespace cv;
using namespace std;

void main()
{
	//VideoCapture capture(0);
	//VideoCapture capture("G:\\TRACK\\Track\\surf\\demo00.avi");

	Mat image01, image02, imgdiff;
	image01 = imread("G:\\TRACK\\Track\\surf\\ASN\\asn001.jpg");
	image02 = imread("G:\\TRACK\\Track\\surf\\ASN\\asn004.jpg");

	image01 = imread("G:\\TRACK\\Track\\surf\\A005.mpg3702.jpg");
	image02 = imread("G:\\TRACK\\Track\\surf\\A005.mpg3703.jpg");
	while (true)
	{
		double time0 = static_cast<double>(getTickCount());//开始计时
		DWORD start_time = GetTickCount();		
		Mat image1, image2;
		cvtColor(image01, image1, CV_RGB2GRAY);
		cvtColor(image02, image2, CV_RGB2GRAY);
		//提取特征点    
		SurfFeatureDetector surfDetector(2500);  // 海塞矩阵阈值,高一点速度会快些
		vector<KeyPoint> keyPoint1, keyPoint2;
		surfDetector.detect(image1, keyPoint1);
		surfDetector.detect(image2, keyPoint2);
		//特征点描述,为下边的特征点匹配做准备    
		SurfDescriptorExtractor SurfDescriptor;
		Mat imageDesc1, imageDesc2;
		SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
		SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
		//获得匹配特征点,并提取最优配对     
		FlannBasedMatcher matcher;
		vector<DMatch> matchePoints;
		matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
		sort(matchePoints.begin(), matchePoints.end()); //特征点排序    

														//获取排在前N个的最优匹配特征点  
		vector<Point2f> imagePoints1, imagePoints2;
		for (int i = 0; i<(int)(matchePoints.size()*0.25); i++)
		{
			imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
			imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
		}
		//获取图像1到图像2的投影映射矩阵 尺寸为3*3
		Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
		//cout<<"变换矩阵为:\n"<<homo<<endl<<endl; //输出映射矩阵
		//图像配准
		Mat imageTransform1, imgpeizhun, imgerzhi;
		warpPerspective(image01, imageTransform1, homo, Size(image02.cols, image02.rows));
		//imshow("经过透视矩阵变换后",imageTransform1);  
		absdiff(image02, imageTransform1, imgpeizhun);
		//imshow("配准diff", imgpeizhun);  
		threshold(imgpeizhun, imgerzhi, 50, 255.0, CV_THRESH_BINARY);
		//imshow("配准二值化", imgerzhi);
		//输出所需时间
		time0 = ((double)getTickCount() - time0) / getTickFrequency();
		DWORD end_time = GetTickCount();
		cout << "The run time is:" << (end_time - start_time) << "ms!" << endl;
		cout << 1 / time0 << endl;
		Mat temp, image02temp;
		float m_BiLi = 0.9;
		image02temp = image02.clone();
		cvtColor(imgerzhi, temp, CV_RGB2GRAY);
		//检索连通域
		Mat se = getStructuringElement(MORPH_RECT, Size(5, 5));
		morphologyEx(temp, temp, MORPH_DILATE, se);
		vector<vector<Point>> contours;
		findContours(temp, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
		if (contours.size()<1)
		{
			continue;
		}

		for (int k = 0; k < contours.size(); k++)
		{
			Rect bomen = boundingRect(contours[k]);

			//省略由于配准带来的边缘无效信息
			if (bomen.x > image02temp.cols * (1 - m_BiLi) && bomen.y > image02temp.rows * (1 - m_BiLi)
				&& bomen.x + bomen.width < image02temp.cols * m_BiLi && bomen.y + bomen.height < image02temp.rows * m_BiLi)
			{
				rectangle(image02temp, bomen, Scalar(255, 0, 255), 2, 8, 0);
			}

		}
		imshow("检测与跟踪", image02temp);
		imwrite("lenna_gray.jpg", image02temp);
		waitKey(20);
		system("pause");
	}
}

测试结果

1 计算耗时上不是很满意:
计算 500*400 的两张图片配准差分:46ms左右
计算 1920*1080 的两张图片配准差分:600ms左右
2 效果上不是很完美
1920*1080:
asn001-asn004:是测试图片
td_1,td_2,td_3分别是:asn001与asn002、asn001与asn003、asn001与asn004配准差分测试结果


500*400:
A005.mpg3702-A005.mpg3703:是测试图片
test_1是A005.mpg3702与A005.mpg3703配准差分测试结果



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值