ORB、SIFT、SURF、手动配准VS程序以及配准后图像融合方法

#include<opencv2\highgui\highgui.hpp>
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include <iostream>  

using namespace cv;
using namespace std;

#if SURF
//SURF
int main()
{

	Mat image01 = imread("0007.jpg", 1);
	Mat image02 = imread("A07.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);

	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);


	//提取特征点    
	SurfFeatureDetector surfDetector(2000);  // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	surfDetector.detect(image1, keyPoint1);
	surfDetector.detect(image2, keyPoint2);

	//特征点描述,为下边的特征点匹配做准备    
	SurfDescriptorExtractor SurfDescriptor;
	Mat imageDesc1, imageDesc2;
	SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
	SurfDescriptor.compute(image2, keyPoint2, imageDesc2);

	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;

	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();

	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.9 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	waitKey();
	return 0;
}
#elif 0
//sift
int main()
{

	Mat image01 = imread("0366.jpg", 1);
	Mat image02 = imread("img441.png", 1);
	imshow("p2", image01);
	imshow("p1", image02);

	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);


	//提取特征点    
	SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	siftDetector.detect(image1, keyPoint1);
	siftDetector.detect(image2, keyPoint2);

	//特征点描述,为下边的特征点匹配做准备    
	SiftDescriptorExtractor SiftDescriptor;
	Mat imageDesc1, imageDesc2;
	SiftDescriptor.compute(image1, keyPoint1, imageDesc1);
	SiftDescriptor.compute(image2, keyPoint2, imageDesc2);

	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;

	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();

	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	imwrite("first_match.jpg", first_match);
	waitKey();
	return 0;
}
#elif 0
//ORB
int main()
{

	Mat image01 = imread("0366.jpg", 1);
	Mat image02 = imread("img441.png", 1);
	imshow("p2", image01);
	imshow("p1", image02);

	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);


	//提取特征点    
	OrbFeatureDetector OrbDetector(1000);  // 在这里调整精度,值越小点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	OrbDetector.detect(image1, keyPoint1);
	OrbDetector.detect(image2, keyPoint2);

	//特征点描述,为下边的特征点匹配做准备    
	OrbDescriptorExtractor OrbDescriptor;
	Mat imageDesc1, imageDesc2;
	OrbDescriptor.compute(image1, keyPoint1, imageDesc1);
	OrbDescriptor.compute(image2, keyPoint2, imageDesc2);

	flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);

	vector<DMatch> GoodMatchePoints;

	Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
	flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());

	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchDistance.rows; i++)
	{
		if (matchDistance.at<float>(i,0) < 0.6 * matchDistance.at<float>(i, 1))
		{
			DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
			GoodMatchePoints.push_back(dmatches);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	imwrite("first_match.jpg", first_match);
	waitKey();
	return 0;
}
#elif 1
//手工标注配准
vector<Point2f> imagePoints1, imagePoints2;
Mat ref_win, src_win;
int pcount = 0;

//Mat二进制文件写
bool matWrite(string filename, Mat &M){
	Mat M_copy;
	M.copyTo(M_copy);
	FILE* file = fopen(filename.c_str(), "wb");
	if (file == NULL || M.empty())
		return false;
	fwrite("CmMat", sizeof(char), 5, file);
	int headData[3] = { M_copy.cols, M_copy.rows, M_copy.type() };
	fwrite(headData, sizeof(int), 3, file);
	fwrite(M.data, sizeof(char), M.step * M.rows, file);
	fclose(file);
	return true;
}
//Mat二进制文件读

bool matRead(const string& filename, Mat& M){
	//Mat M_copy;
	FILE* f = fopen(filename.c_str(), "rb");
	if (f == NULL)
		return false;
	char buf[8];
	int pre = fread(buf, sizeof(char), 5, f);
	if (strncmp(buf, "CmMat", 5) != 0)	{
		printf("Invalidate CvMat data file %s\n", filename.c_str());
		return false;
	}
	int headData[3]; // Width, height, type
	fread(headData, sizeof(int), 3, f);
	Mat M_copy(headData[1], headData[0], headData[2]);
	fread(M.data, sizeof(char), M.step * M.rows, f);
	fclose(f);
	M_copy.copyTo(M);
	return true;
}


void on_mouse1(int event, int x, int y, int flags, void *ustc) 
{
	if (event == CV_EVENT_LBUTTONDOWN)
	{
		Point  p = Point(x, y);
		circle(ref_win, p, 1, Scalar(0, 0, 255), -1);
		imshow("底图", ref_win);
		imagePoints1.push_back(p);   
		cout << "底图: " << p << endl;
		pcount++;
		cout << "ponit num:" << pcount << endl;
	}
}

void on_mouse2(int event, int x, int y, int flags, void *ustc) 
{
	if (event == CV_EVENT_LBUTTONDOWN)
	{
		Point  p = Point(x, y);
		circle(src_win, p, 1, Scalar(0, 0, 255), -1);
		imshow("待配准图", src_win);
		imagePoints2.push_back(p);   
		cout << "待配准图: " << p << endl;
	}
}

int main()
{
	Mat ref = imread("A0001.jpg");  
	Mat src = imread("Bimg1.png"); 

	ref_win = ref.clone();
	src_win = src.clone();

	namedWindow("待配准图");
	namedWindow("底图");
	imshow("待配准图", src_win);
	imshow("底图", ref_win);
	setMouseCallback("待配准图", on_mouse2);
	setMouseCallback("底图", on_mouse1);

	waitKey();
	string str;
	printf("next?\n");
	cin >> str;


	//compute the mertix
	Mat homo = findHomography(imagePoints2, imagePoints1, CV_RANSAC);
	//matWrite("1JuZhen", homo);
	//Mat homo1;
	//matRead("1JuZhen", homo1);

	Mat imageTransform1;
	warpPerspective(src, imageTransform1, homo, Size(ref.cols, ref.rows));   //变换
	imshow("transform", imageTransform1);
	//imshow("基准图打点", ref_win);
	//imshow("待配准图打点", src_win);
	imshow("transform result", imageTransform1);

	imwrite("result.jpg", imageTransform1);
	//imwrite("src_p.jpg", src_win);
	//imwrite("ref_p.jpg", ref_win);

	waitKey();
	return 0;
}
#else
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	double v2[] = { 0, 0, 1 };//左上角
	double v1[3];//变换后的坐标值
	Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量

	V1 = H * V2;
	//左上角(0,0,1)
	cout << "V2: " << V2 << endl;
	cout << "V1: " << V1 << endl;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	//左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	//右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	//右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];

}

int main(int argc, char *argv[])
{
	Mat image01 = imread("img1.png", 1);    //右图
	Mat image02 = imread("img2.png", 1);    //左图
	imshow("p2", image01);
	imshow("p1", image02);

	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);


	//提取特征点    
	OrbFeatureDetector  surfDetector(3000);
	vector<KeyPoint> keyPoint1, keyPoint2;
	surfDetector.detect(image1, keyPoint1);
	surfDetector.detect(image2, keyPoint2);

	//特征点描述,为下边的特征点匹配做准备    
	OrbDescriptorExtractor  SurfDescriptor;
	Mat imageDesc1, imageDesc2;
	SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
	SurfDescriptor.compute(image2, keyPoint2, imageDesc2);

	flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);

	vector<DMatch> GoodMatchePoints;

	Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
	flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());

	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchDistance.rows; i++)
	{
		if (matchDistance.at<float>(i, 0) < 0.4 * matchDistance.at<float>(i, 1))
		{
			DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
			GoodMatchePoints.push_back(dmatches);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);

	vector<Point2f> imagePoints1, imagePoints2;

	for (int i = 0; i<GoodMatchePoints.size(); i++)
	{
		imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
		imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
	}



	//获取图像1到图像2的投影映射矩阵 尺寸为3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差  
	//Mat   homo=getPerspectiveTransform(imagePoints1,imagePoints2);  
	cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵      

	//计算配准图的四个顶点坐标
	CalcCorners(homo, image01);
	cout << "left_top:" << corners.left_top << endl;
	cout << "left_bottom:" << corners.left_bottom << endl;
	cout << "right_top:" << corners.right_top << endl;
	cout << "right_bottom:" << corners.right_bottom << endl;

	//图像配准  
	Mat imageTransform1, imageTransform2;
	warpPerspective(image01, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
	//warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
	imshow("直接经过透视矩阵变换", imageTransform1);
	imwrite("trans1.jpg", imageTransform1);

	waitKey();

	return 0;
}
#endif

关于图像融合:可以使用MATLAB中的waveanalyzer工具箱进行小波分解融合,也可以使用使用如下程序进行融合,不过程序里面我采用的是高频和低频取平均融合的方法-.-

 

%合成两幅图像:将图像girl.bmp融合到图像scene.png上
clear
%读入第一个图像Girl.bmp
filename1='girl.bmp';
info=imfinfo(filename1);                %取一张图片的具体信息
width1=info.Width;
height1=info.Height;
if strcmp(info.ColorType,'grayscale')==1
    [A,MAP]=gray2ind(imread(filename1));%将灰度图变为索引图
    RGB1=ind2rgb(A,MAP);                %将索引图变为真彩色图
end
if strcmp(info.ColorType,'indexed')==1
    [A,MAP]=imread(filename1);
    RGB1=ind2rgb(A,MAP);                %将索引图变为真彩色图
end    
if strcmp(info.ColorType,'truecolor')==1
    RGB1=imread(filename1);
end
figure,imshow(RGB1);

%读入第二个图像Scene.png
filename2='scene.png';      
info=imfinfo(filename2);                %取一张图片的具体信息
width2=info.Width;
height2=info.Height;
if strcmp(info.ColorType,'grayscale')==1
    [A,MAP]=gray2ind(imread(filename2));%将灰度图变为索引图
    RGB2=ind2rgb(A,MAP);                %将索引图变为真彩色图
end
if strcmp(info.ColorType,'indexed')==1
    [A,MAP]=imread(filename2);
    RGB2=ind2rgb(A,MAP);                %将索引图变为真彩色图
end    
if strcmp(info.ColorType,'truecolor')==1
    RGB2=imread(filename2);
end
figure,imshow(RGB2);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%种子填充,得到参考图像
BW=~im2bw(RGB1);                        %图像二值化后取反
figure;imshow(BW);
%选择二值化图像左上角、右上角为种子起始点,连通模板选择4邻域的,将人物外轮廓以外
%的像素设置为1得B,然后用BW减去B,即可。
B=imfill(imfill(BW,[1,1],4),[1,width1],4);
figure;imshow(B);
B=~xor(BW,B);                        %经过异或取反()得到了人物轮廓参考图像B
figure;imshow(B);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%尝试边缘检测,然后再imfill填充hole的尝试,不过,效果不好没
%有成功,后来知道是外轮廓底端不封闭的缘故
BW2=im2bw(RGB1); 
figure,imshow(BW2);
[m n]=size(BW2);
imgn=zeros(m,n);        %边界标记图像
ed=[-1 -1;0 -1;1 -1;1 0;1 1;0 1;-1 1;-1 0]; %从左上角像素,逆时针搜索
for i=2:m-1
    for j=2:n-1
        if BW2(i,j)==1 && imgn(i,j)==0      %当前是没标记的白色像素
            if sum(sum(BW2(i-1:i+1,j-1:j+1)))~=9    %块内部的白像素不标记
                ii=i;         %像素块内部搜寻使用的坐标
                jj=j;
                imgn(i,j)=1;    %本像素块第一个标记的边界,第一个边界像素为2
                
                while imgn(ii,jj)~=1    %是否沿着像素块搜寻一圈了。
                    for k=1:8           %逆时针八邻域搜索
                        tmpi=ii+ed(k,1);        %八邻域临时坐标
                        tmpj=jj+ed(k,2);
                        %搜索到新边界,并且没有搜索一圈
                        if BW2(tmpi,tmpj)==1 && imgn(tmpi,tmpj)~=1  
                           ii=tmpi;        %更新内部搜寻坐标,继续搜索
                           jj=tmpj;
                           imgn(ii,jj)=1;%边界标记图像该像素标记,普通边界为1
                            break;
                        end
                    end
                end
                
            end
        end
    end
end

figure;imshow(imgn)
%imgn=imgn>=1;
imfill(imgn,'hole')
figure,imshow(imgn);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%对第一个图像进行预处理
A=im2double(RGB1);                    %人物原图像变为双精度型
D=zeros(height1,width1);              %前景和背景交界点的标志矩阵D
K=3;                                  %领域宽度
for i=1:1:height1
    for j=1:1:width1
        if B(i,j)==0          %参考图像数据为0时,将第一幅图像的背景替换为蓝色
            A(i,j,1)=0;
            A(i,j,2)=0;
            A(i,j,3)=1.0;
        else                        %对第一个图像中的前景和背景交界处进行处理
            for k=1:1:K
                N(k)=B(i,j+k-round(K/2));
            end
            if ~all(N)                %假如该点左右两个像素点存在背景点  
                if ~isempty(find(N==1))   
                    k1=1;k2=-1;
                    while 1           %用内点(人廓内部)替换该点颜色
                        r=A(i,j+k1,1);g=A(i,j+k1,2);b=A(i,j+k1,3);
                        if B(i,j+k1)==1
                            break;
                        end
                        r=A(i,j+k2,1);g=A(i,j+k2,2);b=A(i,j+k2,3);
                        if B(i,j+k2)==1
                            break;
                        end
                        k1=k1+1;k2=k2-1;
                    end
                    A(i,j,1)=r;
                    A(i,j,2)=g;
                    A(i,j,3)=b;
                    D(i,j)=1;         %对前景背景交界点做标志
                end       
            end
        end        
    end
end
figure;imshow(A);
x=0;y=0;                        %第一幅图像融合到第二幅图像的起点位置指定
RGB=im2double(RGB2);

%融合图像
for i=1:1:height1
    for j=1:1:width1
        if B(i,j)==1   
            if D(i,j)==1             %交界点的处理,用渐进法。  
                RGB(y+i,x+j,1)=0.5*A(i,j,1)+0.5*RGB(y+i,x+j,1);
                RGB(y+i,x+j,2)=0.5*A(i,j,2)+0.5*RGB(y+i,x+j,2);
                RGB(y+i,x+j,3)=0.5*A(i,j,3)+0.5*RGB(y+i,x+j,3);
            else                     %前景的处理,直接用人物图像替代
                RGB(y+i,x+j,1)=A(i,j,1);
                RGB(y+i,x+j,2)=A(i,j,2);
                RGB(y+i,x+j,3)=A(i,j,3);    
            end
        end
    end
end
figure,imshow(RGB);
clc;

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值