【学习opencv】医疗图像拼接,卫星图像拼接




//参考网站
// http://blog.csdn.net/xukaiwen_2016/article/details/53149794
// http://blog.csdn.net/dcrmg/article/details/52629856
//http://blog.csdn.net/czl389/article/details/60325970  (*)

方法1 

VS2010 +OpenCV2.4.9

#include "highgui/highgui.hpp"  
#include "opencv2/nonfree/nonfree.hpp"  
#include "opencv2/legacy/legacy.hpp" 

using namespace cv;

//计算原始图像点位在经过矩阵变换后在目标图像上对应位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri);
static void ShowHelpText();
int main(int argc,char *argv[])  
{  
	system("color 5F");  
	ShowHelpText();
	//Mat image01=imread(argv[1]);  
	//Mat image02=imread(argv[2]);
	Mat image01,image02;  
	if (argc < 2)  
	{  
		image01 = imread("left.jpg");  
		image02 = imread("right.jpg");  
	}  
	else  
	{  
		image01 = imread(argv[1]);  
		image02 = imread(argv[2]);  
	}  
	if (image01.empty() || image02.empty())  
	{  
		return 0;//图像没有全部读取成功  
	}  
	//
	imshow("拼接图像1",image01);
	imshow("拼接图像2",image02);

	//灰度图转换
	Mat image1,image2;  
	cvtColor(image01,image1,CV_RGB2GRAY);
	cvtColor(image02,image2,CV_RGB2GRAY);

	//提取特征点  
	SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值
	vector<KeyPoint> keyPoint1,keyPoint2;  
	siftDetector.detect(image1,keyPoint1);  
	siftDetector.detect(image2,keyPoint2);	

	//特征点描述,为下边的特征点匹配做准备  
	SiftDescriptorExtractor siftDescriptor;  
	Mat imageDesc1,imageDesc2;  
	siftDescriptor.compute(image1,keyPoint1,imageDesc1);  
	siftDescriptor.compute(image2,keyPoint2,imageDesc2);	

	//获得匹配特征点,并提取最优配对  	
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;  
	matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());
	sort(matchePoints.begin(),matchePoints.end()); //特征点排序	
	//获取排在前N个的最优匹配特征点
	vector<Point2f> imagePoints1,imagePoints2;
	for(int i=0;i<10;i++)
	{		
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);		
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);		
	}

	//获取图像1到图像2的投影映射矩阵,尺寸为3*3
	Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);		
	Mat adjustMat=(Mat_<double>(3,3)<<1.0,0,image01.cols,0,1.0,0,0,0,1.0);
	Mat adjustHomo=adjustMat*homo;

	//获取最强配对点在原始图像和矩阵变换后图像上的对应位置,用于图像拼接点的定位
	Point2f originalLinkPoint,targetLinkPoint,basedImagePoint;
	originalLinkPoint=keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint=getTransformPoint(originalLinkPoint,adjustHomo);
	basedImagePoint=keyPoint2[matchePoints[0].trainIdx].pt;

	//图像配准
	Mat imageTransform1;
	warpPerspective(image01,imageTransform1,adjustMat*homo,Size(image02.cols+image01.cols+110,image02.rows));

	//在最强匹配点左侧的重叠区域进行累加,是衔接稳定过渡,消除突变
	Mat image1Overlap,image2Overlap; //图1和图2的重叠部分	
	image1Overlap=imageTransform1(Rect(Point(targetLinkPoint.x-basedImagePoint.x,0),Point(targetLinkPoint.x,image02.rows)));
	image2Overlap=image02(Rect(0,0,image1Overlap.cols,image1Overlap.rows));
	Mat image1ROICopy=image1Overlap.clone();  //复制一份图1的重叠部分
	for(int i=0;i<image1Overlap.rows;i++)
	{
		for(int j=0;j<image1Overlap.cols;j++)
		{
			double weight;
			weight=(double)j/image1Overlap.cols;  //随距离改变而改变的叠加系数
			image1Overlap.at<Vec3b>(i,j)[0]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[0]+weight*image2Overlap.at<Vec3b>(i,j)[0];
			image1Overlap.at<Vec3b>(i,j)[1]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[1]+weight*image2Overlap.at<Vec3b>(i,j)[1];
			image1Overlap.at<Vec3b>(i,j)[2]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[2]+weight*image2Overlap.at<Vec3b>(i,j)[2];
		}
	}
	Mat ROIMat=image02(Rect(Point(image1Overlap.cols,0),Point(image02.cols,image02.rows)));	 //图2中不重合的部分
	ROIMat.copyTo(Mat(imageTransform1,Rect(targetLinkPoint.x,0, ROIMat.cols,image02.rows))); //不重合的部分直接衔接上去
	namedWindow("拼接结果",0);
	imshow("拼接结果",imageTransform1);	
	imwrite("D:\\拼接结果.jpg",imageTransform1);
	waitKey();  
	return 0;  
}

//计算原始图像点位在经过矩阵变换后在目标图像上对应位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri)
{
	Mat originelP,targetP;
	originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
	targetP=transformMaxtri*originelP;
	float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
	float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
	return Point2f(x,y);
}




static void ShowHelpText()
{
	//输出欢迎信息
	printf("\n\n\t\t\t本程序经华电-苏晓朋测试!\n");
	printf("\n\n\t\t\t感谢opencv的开源,魅力无穷\n");
	printf("\n\n  ----------------------------------------------------------------------------\n");

	//输出一些帮助信息
	printf("\n\n\n\t欢迎交流,作者邮箱为xiaopengsu@ncepu.edu.cn\n\n");
	printf("\n\n\n\t欢迎交流,CSDN博客为http://blog.csdn.net/kyjl888\n\n");
	
}


方法2

//#include "highgui/highgui.hpp"  
//#include "opencv2/nonfree/nonfree.hpp"  
//#include "opencv2/legacy/legacy.hpp" 
//
//using namespace cv;
//
计算原始图像点位在经过矩阵变换后在目标图像上对应位置
//Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri);
//  static void ShowHelpText();

//int main(int argc,char *argv[])  
//{  
//	//Mat image01=imread(argv[1]);  
//	//Mat image02=imread(argv[2]);
//	Mat image01,image02;  
//	if (argc < 2)  
//	{  
//		image01 = imread("left.jpg");  
//		image02 = imread("right.jpg");  
//	}  
//	else  
//	{  
//		image01 = imread(argv[1]);  
//		image02 = imread(argv[2]);  
//	}  
//	if (image01.empty() || image02.empty())  
//	{  
//		return 0;//图像没有全部读取成功  
//	}  
//	imshow("拼接图像1",image01);
//	imshow("拼接图像2",image02);
//
//	//灰度图转换
//	Mat image1,image2;  
//	cvtColor(image01,image1,CV_RGB2GRAY);
//	cvtColor(image02,image2,CV_RGB2GRAY);
//
//	//提取特征点  
//	SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值
//	vector<KeyPoint> keyPoint1,keyPoint2;  
//	siftDetector.detect(image1,keyPoint1);  
//	siftDetector.detect(image2,keyPoint2);	
//
//	//特征点描述,为下边的特征点匹配做准备  
//	SiftDescriptorExtractor siftDescriptor;  
//	Mat imageDesc1,imageDesc2;  
//	siftDescriptor.compute(image1,keyPoint1,imageDesc1);  
//	siftDescriptor.compute(image2,keyPoint2,imageDesc2);	
//
//	//获得匹配特征点,并提取最优配对  	
//	FlannBasedMatcher matcher;
//	vector<DMatch> matchePoints;  
//	matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());
//	sort(matchePoints.begin(),matchePoints.end()); //特征点排序	
//	//获取排在前N个的最优匹配特征点
//	vector<Point2f> imagePoints1,imagePoints2;
//	for(int i=0;i<10;i++)
//	{		
//		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);		
//		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);		
//	}
//
//	//获取图像1到图像2的投影映射矩阵,尺寸为3*3
//	Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);		
//	Mat adjustMat=(Mat_<double>(3,3)<<1.0,0,image01.cols,0,1.0,0,0,0,1.0);
//	Mat adjustHomo=adjustMat*homo;
//
//	//获取最强配对点在原始图像和矩阵变换后图像上的对应位置,用于图像拼接点的定位
//	Point2f originalLinkPoint,targetLinkPoint,basedImagePoint;
//	originalLinkPoint=keyPoint1[matchePoints[0].queryIdx].pt;
//	targetLinkPoint=getTransformPoint(originalLinkPoint,adjustHomo);
//	basedImagePoint=keyPoint2[matchePoints[0].trainIdx].pt;
//
//	//图像配准
//	Mat imageTransform1;
//	warpPerspective(image01,imageTransform1,adjustMat*homo,Size(image02.cols+image01.cols+10,image02.rows));
//
//	//在最强匹配点的位置处衔接,最强匹配点左侧是图1,右侧是图2,这样直接替换图像衔接不好,光线有突变
//	Mat ROIMat=image02(Rect(Point(basedImagePoint.x,0),Point(image02.cols,image02.rows)));	
//	ROIMat.copyTo(Mat(imageTransform1,Rect(targetLinkPoint.x,0,image02.cols-basedImagePoint.x+1,image02.rows)));
//
//	namedWindow("拼接结果",0);
//	imshow("拼接结果",imageTransform1);	
//	waitKey();  
//	return 0;  
//}
//
计算原始图像点位在经过矩阵变换后在目标图像上对应位置
//Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri)
//{
//	Mat originelP,targetP;
//	originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
//	targetP=transformMaxtri*originelP;
//	float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
//	float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
//	return Point2f(x,y);


  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值