版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/czl389/article/details/60325970 </div>
<link rel="stylesheet" href="https://csdnimg.cn/release/phoenix/template/css/ck_htmledit_views-3019150162.css">
<div id="content_views" class="markdown_views">
<!-- flowchart 箭头图标 勿删 -->
<svg xmlns="http://www.w3.org/2000/svg" style="display: none;">
<path stroke-linecap="round" d="M5,0 0,2.5 5,5z" id="raphael-marker-block" style="-webkit-tap-highlight-color: rgba(0, 0, 0, 0);"></path>
</svg>
<p>图像拼接首要步骤就是对齐。对齐就要找到两幅图像相对的位置关系。为了描述位置之间的变换关系,研究者引人了诸如平移,仿射,单应等变换模型。每个模型无所谓好坏,各有特定的适用范围。</p>
在其次坐标系下,图像位置之间的关系,或者说同名点坐标之间关系,都可以用一个3×3的矩阵来表达。从平移到单应,这个变换矩阵的自由度逐步上升,灵活度增加,适用的场合变广,但也导致求解出来的变换矩阵不太准确和稳定,意思是容易拼飞。所以,能够用平移变换模型解决的问题,不见得使用单应变换矩阵更好。模型越紧,解越精确。
本篇博客使用单应变换模型,完成两幅图像的拼接。
单应矩阵的求解,按照“特征检测+特征描述+特征匹配+直接线性变换”的方法。
拼接对齐图像使用OpenCV里的warpPespective()
函数。
代码实现:
Homography类
将H矩阵(单应矩阵)的求解封装进Homography
类中。
//Homography.h 类声明文件
#pragma once
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/imgproc/imgproc.hpp"
#include"opencv2/nonfree/nonfree.hpp"
#include"opencv2/calib3d/calib3d.hpp"
#include<iostream>
using namespace cv;
using namespace std;
class Homography
{
private:
Mat img1;
Mat img2;
Ptr<FeatureDetector> detector;
Ptr<DescriptorExtractor> extractor;
Ptr<DescriptorMatcher> matcher;
vector<KeyPoint> keyPoints1;
vector<KeyPoint> keyPoints2;
Mat descriptors1;
Mat descriptors2;
vector<DMatch> firstMatches;
vector<DMatch> matches;
vector<Point2f> selfPoints1;
vector<Point2f> selfPoints2;
vector<uchar> inliers;
Mat homography;
public:
Homography();
Homography(Mat img1, Mat img2) ;
void setFeatureDetector(string detectorName);
void setDescriptorExtractor(string descriptorName);
void setDescriptorMatcher(string matcherName);
vector<KeyPoint> getKeyPoints1();
vector<KeyPoint> getKeyPoints2();
Mat getDescriptors1();
Mat getDescriptors2();
vector<DMatch> getMatches();
void drawMatches();
Mat getHomography();
~Homography();
private:
void detectKeyPoints();
void computeDescriptors();
void match();
void matchesToSelfPoints();
void findHomography();
void matchesFilter();
};
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
//Homography.cpp 类实现文件
#include "Homography.h"
Homography::Homography()
{
detector = new SIFT(800);
extractor = detector;
matcher = DescriptorMatcher::create("BruteForce");
}
Homography::Homography(Mat img1, Mat img2)
{
new(this) Homography();
this->img1 = img1;
this->img2 = img2;
}
void Homography::setFeatureDetector(string detectorName)
{
detector = FeatureDetector::create(detectorName);
}
void Homography::setDescriptorExtractor(string descriptorName)
{
extractor = DescriptorExtractor::create(descriptorName);
}
void Homography::setDescriptorMatcher(string matcherName)
{
matcher = DescriptorMatcher::create(matcherName);
}
vector<KeyPoint> Homography::getKeyPoints1()
{
if (keyPoints1.size() == 0)
{
detectKeyPoints();
}
return keyPoints1;
}
vector<KeyPoint> Homography::getKeyPoints2()
{
if (keyPoints2.size()==0)
{
detectKeyPoints();
}
return keyPoints2;
}
Mat Homography::getDescriptors1()
{
if (descriptors1.data == NULL)
{
computeDescriptors();
}
return descriptors1;
}
Mat Homography::getDescriptors2()
{
if (descriptors2.data == NULL)
{
computeDescriptors();
}
return descriptors2;
}
vector<DMatch> Homography::getMatches()
{
if (matches.size() == 0)
{
matchesFilter();
}
return matches;
}
Mat Homography::getHomography()
{
if (homography.data == NULL)
{
findHomography();
}
return homography;
}
void Homography::drawMatches()
{
Mat matchImage;
if (matches.size() == 0)
{
matchesFilter();
}
cv::drawMatches(img1, keyPoints1, img2, keyPoints2, matches, matchImage, 255, 255);
imshow("drawMatches", matchImage);
}
void Homography::detectKeyPoints()
{
detector->detect(img1, keyPoints1, Mat());
detector->detect(img2, keyPoints2, Mat());
}
void Homography::computeDescriptors()
{
if (keyPoints1.size() == 0 || keyPoints2.size() == 0)
{
detectKeyPoints();
}
extractor->compute(img1,keyPoints1,descriptors1);
extractor->compute(img2, keyPoints2, descriptors2);
}
void Homography::match()
{
if (descriptors1.data == NULL || descriptors2.data == NULL)
{
computeDescriptors();
}
matcher->match(descriptors1, descriptors2, firstMatches, Mat());
}
void Homography::matchesToSelfPoints()
{
for (vector<DMatch>::const_iterator it = firstMatches.begin(); it != firstMatches.end(); ++it)
{
selfPoints1.push_back(keyPoints1.at(it->queryIdx).pt);
selfPoints2.push_back(keyPoints2.at(it->trainIdx).pt);
}
}
void Homography::findHomography()
{
if (firstMatches.size()==0)
{
match();
}
if (selfPoints1.size()==0||selfPoints2.size()==0)
{
matchesToSelfPoints();
}
inliers=vector<uchar>(selfPoints1.size(),0);
homography = cv::findHomography(selfPoints1, selfPoints2, inliers, CV_FM_RANSAC, 1.0);
}
void Homography::matchesFilter()
{
if (0 == firstMatches.size())
{
findHomography();
}
vector<DMatch>::const_iterator itM = firstMatches.begin();
vector<uchar>::const_iterator itIn = inliers.begin();
for (; itIn != inliers.end(); ++itIn, ++itM)
{
if (*itIn)
{
matches.push_back(*itM);
}
}
}
Homography::~Homography()
{
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
Homography类使用说明
创建类对象,需要指定两幅输入图像:
Homography h12(img1,img2);
- 1
可以设定特征检测器、描述器,匹配器的类型。默认情况下,使用SIFT检测和描述特征,使用BruteForce算法匹配特征。获取更多可用的类型,可以参见OpenCV通用的程序接口。使用示例:
h12.setFeatureDetector("FAST");
h12.setDescriptorExtractor("SIFT");
h12.setDescriptorMatcher("BruteForce");
- 1
- 2
- 3
可以检查各种中间量,比如检测出的角点,描述子,匹配,以及画出匹配。使用例子如下:
//获取两幅图像的特征点
vector<KeyPoint> keyPoints1=h12.getKeyPoints1();
vector<KeyPoint> keyPoints2=h12.getKeyPoints2();
//获取描述子
Mat descriptors1=h12.getDescriptors1();
Mat descriptors2=h12.getDescriptors2();
//获取匹配
vector<DMatch> matches=h12.getMatches();
//画出带有匹配连接线的图像
h12.drawMatches();
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
可以直接获取计算出的单应矩阵:
Mat h=h12.getHomography();
- 1
使用warpPerspective()拼接
#include"Homography.h"
int main()
{
string imgPath1 = "trees_000.jpg";
string imgPath2 = "trees_001.jpg";
Mat img1 = imread(imgPath1, CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(imgPath2, CV_LOAD_IMAGE_GRAYSCALE);
Homography homo12(img1,img2);
Mat h12 = homo12.getHomography();
Mat h21;
invert(h12, h21, DECOMP_LU);
Mat canvas;
Mat img1_color = imread(imgPath1, CV_LOAD_IMAGE_COLOR);
Mat img2_color = imread(imgPath2, CV_LOAD_IMAGE_COLOR);
warpPerspective(img2_color, canvas, h21, Size(img1.cols*2, img1.rows));
img1_color.copyTo(canvas(Range::all(), Range(0, img1.cols)));
imshow("canvas",canvas);
waitKey(0);
return 0;
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
trees_000.jpg
trees_001.jpg
canvas.jpg