图像拼接C++代码记录

一、介绍

   要实现图像拼接需要哪几步呢?简单来说有以下几步:

   Step1: 对每幅图进行特征点提取

   Step2: 对对特征点进行匹配

   Step3: 进行图像配准

   Step4: 把图像拷贝到另一幅图像的特定位置

   Step4: 对重叠边界进行特殊处理(融合)

二、代码

    基于Surf的特征点检测

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include <iostream>  

using namespace cv;
using namespace std;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

Rect CalculateRoI(Mat& imageTransform, Mat& image02);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	// 左上角(0,0,1)
	double v2[] = { 0, 0, 1 };         
	double v1[3];
	Mat V2 = Mat(3, 1, CV_64FC1, v2);
	Mat V1 = Mat(3, 1, CV_64FC1, v1);
	V1 = H * V2;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	// 左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2); 
	V1 = Mat(3, 1, CV_64FC1, v1);
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	// 右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2); 
	V1 = Mat(3, 1, CV_64FC1, v1); 
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	// 右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);
	V1 = Mat(3, 1, CV_64FC1, v1);
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];
}

int main()
{
	Mat image01 = imread("imgs//right01.png", 1);  //右图
	Mat image02 = imread("imgs//left01.png", 1);  //左图

	imshow("p1", image01);
	imshow("p2", image02);

	//灰度图转换  
	Mat gray1, gray2;
	cvtColor(image01, gray1, CV_RGB2GRAY);
	cvtColor(image02, gray2, CV_RGB2GRAY);

	//提取特征点    
	SurfFeatureDetector Detector(200);
	vector<KeyPoint> keyPoint1, keyPoint2;
	Detector.detect(gray1, keyPoint1);
	Detector.detect(gray2, keyPoint2);

	//特征点描述 
	SurfDescriptorExtractor Descriptor;
	Mat imageDesc1, imageDesc2;
	Descriptor.compute(gray1, keyPoint1, imageDesc1);
	Descriptor.compute(gray2, keyPoint2, imageDesc2);

	//特征点匹配
	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	vector<DMatch> GoodMatchePoints;
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}
	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match", first_match);
	imwrite("first_match.jpg", first_match);

	vector<Point2f> imagePoints1, imagePoints2;
	for (int i = 0; i < GoodMatchePoints.size(); i++)
	{
		imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
		imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
	}

	// 获取图像1到图像2的投影映射矩阵,尺寸为3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	for (int i = 0; i < homo.rows; i++)
	{
		cout << "投影映射矩阵第" << i << "行: "
			<< static_cast<unsigned>(homo.at<uchar>(i, 0)) << " "
			<< static_cast<unsigned>(homo.at<uchar>(i, 1)) << " "
			<< static_cast<unsigned>(homo.at<uchar>(i, 2)) << endl;
	}

	// 计算配准图的四个顶点坐标
	CalcCorners(homo, image01);
	cout << "left_top:    " << corners.left_top << endl;
	cout << "left_bottom: " << corners.left_bottom << endl;
	cout << "right_top:   " << corners.right_top << endl;
	cout << "right_bottom:" << corners.right_bottom << endl;

	// 图像配准  
	Mat imageTransform;
	warpPerspective(image01, imageTransform, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
	imshow("trans1", imageTransform);
	imwrite("trans1.jpg", imageTransform);

	// 计算裁剪ROI
	Rect RoI = CalculateRoI(imageTransform, image02);

	// 创建拼接后的图,需提前计算图的大小
	int dst_width = imageTransform.cols;
	int dst_height = imageTransform.rows;

	// 直接拼接
	Mat dst(dst_height, dst_width, CV_8UC3);
	dst.setTo(0);
	imageTransform.copyTo(dst(Rect(0, 0, imageTransform.cols, imageTransform.rows)));
	image02.copyTo(dst(Rect(0, 0, image02.cols, image02.rows)));
	imshow("b_dst", dst);
	imwrite("b_dst.jpg", dst);

	// 拼接处融合+边缘裁剪处理
	OptimizeSeam(image02, imageTransform, dst);
	Mat dst_crop = dst(RoI);
	imshow("dst_crop", dst_crop);
	imwrite("dst_crop.jpg", dst_crop);

	waitKey();

	return 0;
}

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
	int rows = img1.rows;
	int cols = img1.cols;
	int start = MIN(corners.left_top.x, corners.left_bottom.x);
	double processWidth = cols - start;
	double alpha = 1;
	for (int i = 0; i < rows; i++)
	{
		uchar* p = img1.ptr<uchar>(i);
		uchar* t = trans.ptr<uchar>(i);
		uchar* d = dst.ptr<uchar>(i);
		int label = 0;
		int begin = start;
		double wid = processWidth;
		for (int j = start; j < cols; j++)
		{
			if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
			{
				// 如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
				alpha = 1;
			}
			else
			{
				// 找到trans的每行第一个非0的位置
				if (label == 0)
				{
					label = 1;
					begin = j;
					wid = cols - begin;
				}
				// img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比
				// alpha = (processWidth - j + start) / processWidth;
				alpha = (wid - j + begin) / wid;
			}
			d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
			d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
			d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
		}
	}
}

Rect CalculateRoI(Mat & imageTransform, Mat & image02)
{
	// 计算变换投影图像的有效区域
	Mat maskROI = Mat::zeros(imageTransform.size(), CV_8UC1);
	vector<Point> poly;
	poly.push_back(corners.left_top);
	poly.push_back(corners.right_top);
	poly.push_back(corners.right_bottom);
	poly.push_back(corners.left_bottom);
	vector<vector<Point>> polys;
	polys.push_back(poly);
	fillPoly(maskROI, polys, Scalar(255));

	// 计算拼接后图像的有效RoI
	Mat maskROI2 = Mat::zeros(imageTransform.size(), CV_8UC1);
	maskROI2(Rect(0, 0, image02.cols, image02.rows)) = 255;
	bitwise_or(maskROI2, maskROI, maskROI2);
	bitwise_not(maskROI2, maskROI2);

	vector<vector<Point>> conts1;
	findContours(maskROI2, conts1, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
	Rect RoI = Rect(0, 0, maskROI2.cols, maskROI2.rows);
	for (int i = 0; i < conts1.size(); i++)
	{
		Rect rect = boundingRect(conts1[i]);
		cout << rect.x << " " << rect.y << " " << rect.x + rect.width << " " << rect.y + rect.height << endl;
		if (rect.width < rect.height)
		{
			RoI.width = rect.x - 1;
		}
		else if (rect.y <= 1)
		{
			RoI.y = rect.height + rect.y + 1;
			RoI.height -= rect.height + rect.y + 1;
		}
		else
		{
			RoI.height -= rect.height;
		}
	}

	return RoI;
}

    基于Sift的特征点检测

int main()
{
	Mat image01 = imread("1.png", 1);  // 右边
	Mat image02 = imread("2.png", 1);  // 左边
	imshow("p1", image01);
	imshow("p2", image02);

	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);

	// 提取特征点    
	SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值,调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	siftDetector.detect(image1, keyPoint1);
	siftDetector.detect(image2, keyPoint2);

	// 特征点描述 
	SiftDescriptorExtractor SiftDescriptor;
	Mat imageDesc1, imageDesc2;
	SiftDescriptor.compute(image1, keyPoint1, imageDesc1);
	SiftDescriptor.compute(image2, keyPoint2, imageDesc2);

	// 特征点匹配
	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	vector<DMatch> GoodMatchePoints;
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	imwrite("first_match.jpg", first_match);
    
    return 0;
}

    基于Orb的特征点检测

int main()
{
	Mat image01 = imread("1.png", 1);  //右图
	Mat image02 = imread("2.png", 1);  //左图
	imshow("p1", image01);
	imshow("p2", image02);

	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);

	// 提取特征点    
	OrbFeatureDetector OrbDetector(3000);
	vector<KeyPoint> keyPoint1, keyPoint2;
	OrbDetector.detect(image1, keyPoint1);
	OrbDetector.detect(image2, keyPoint2);

	// 特征点描述  
	OrbDescriptorExtractor OrbDescriptor;
	Mat imageDesc1, imageDesc2;
	OrbDescriptor.compute(image1, keyPoint1, imageDesc1);
	OrbDescriptor.compute(image2, keyPoint2, imageDesc2);

	flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
	Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
	flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());

	// Lowe's algorithm, 获取优秀匹配点
	vector<DMatch> GoodMatchePoints;
	for (int i = 0; i < matchDistance.rows; i++)
	{
		if (matchDistance.at<float>(i, 0) < 0.4 * matchDistance.at<float>(i, 1))
		{
			DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
			GoodMatchePoints.push_back(dmatches);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	imwrite("match.jpg", first_match);

    return 0;
}

    基于Fast的特征点检测

int main()
{
	Mat image01 = imread("1.png", 1);  // 右边
	Mat image02 = imread("2.png", 1);  // 左边
	imshow("p1", image01);
	imshow("p2", image02);

	// 灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);

	// 提取特征点    
	FastFeatureDetector Detector(50);  // 阈值 
	vector<KeyPoint> keyPoint1, keyPoint2;
	Detector.detect(image1, keyPoint1);
	Detector.detect(image2, keyPoint2);

	// 特征点描述   
	SiftDescriptorExtractor Descriptor;
	Mat imageDesc1, imageDesc2;
	Descriptor.compute(image1, keyPoint1, imageDesc1);
	Descriptor.compute(image2, keyPoint2, imageDesc2);

	// 特征点匹配
	BruteForceMatcher< L2<float> > matcher;
	vector<vector<DMatch> > matchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	vector<DMatch> GoodMatchePoints;
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
	imshow("first_match ", first_match);
	imwrite("first_match.jpg", first_match);

    return 0;
}

基于Stitch的图像拼接

#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching.hpp"
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
	Mat img1 = imread("left1.png", cv::IMREAD_COLOR);
	Mat img2 = imread("right1.png", cv::IMREAD_COLOR);

	vector<Mat> imgs;
	imgs.push_back(img1);
	imgs.push_back(img2);

	Mat pano;
	Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::PANORAMA);
	Stitcher::Status status = stitcher->stitch(imgs, pano);
	if (status != Stitcher::OK)
	{
		cout << "Can't stitch images, error code = " << int(status) << endl;
		return EXIT_FAILURE;
	}

	string result_name = "result1.jpg";
	imwrite(result_name, pano);
	cout << "stitching completed successfully\n" << result_name << " saved!";
	return EXIT_SUCCESS;
}

很抱歉,我无法提供完整的C++代码。但是,根据引用\[1\]和\[2\]中提供的信息,我可以给出一个大致的思路来实现双鱼眼全景图像拼接C++代码。 首先,你需要使用OpenCV库来处理图像。你可以使用鱼眼摄像头的图像,然后将其转换为矩形展开图。这可以通过使用OpenCV的remap函数来实现,该函数可以根据预先计算好的坐标映射图将图像进行畸变校正。 接下来,你可以使用SIFT(尺度不变特征变换)算法或其他特征提取算法来找到图像中的配对点。这些配对点将用于后续的图像拼接。 然后,你需要对图像进行融合,以消除明显的光照差异。你可以尝试使用光照补偿算法来实现这一点。 最后,你可以使用图像金字塔来进行图像的缩放和卷积操作,以提高图像的质量和效果。 请注意,这只是一个大致的思路,实际的代码实现可能会更加复杂。你可能需要进一步研究和调整参数来获得最佳的拼接效果。同时,你还可以参考引用\[3\]中提供的论文和代码,以获取更多关于双鱼眼全景图像拼接的细节和优化方法。 希望这些信息对你有所帮助! #### 引用[.reference_title] - *1* *2* *3* [双鱼眼图像拼接](https://blog.csdn.net/hard_level/article/details/103854631)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insert_down1,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值