Table of Contents
1)将关键帧插入地图中(对应KeyFrame Insertioin)
2)剔除地图中不符合要求的MapPoint(对应Recent MapPoints Culling)
3)创建MapPoint(对应New Points Creation)
5)剔除关键帧(对应Local KeyFrames Culling)
1.LocalMapping线程代码框架
还是先来看看ORB-SLAM2的系统架构图。我的习惯是在看某个系统的时候,要始终将其框架图装在脑海里,分析代码的时候将代码和框架图进行对比来阅读代码。
LocalMapping 顾名思义就是局部地图的意思。从这张框架图可以看出,LocalMapping线程在系统中通过关键帧将Tracking线程和Loop closing线程连接了起来:将Tracking线程中生成的关键帧在LocalMapping线程中经过相关处理后,再转给LoopCLosing线程进行处理。
LocalMapping线程主要的任务是下面5个:
1)将关键帧插入地图中(对应KeyFrame Insertioin)。
2)剔除MapPoint(对应Recent MapPoints Culling)。
3)创建MapPoint(对应New Points Creation)。
4)局部BA优化(对应Local BA)。
5)剔除关键帧(对应Local KeyFrames Culling)。
下面结合代码来看每一个步骤。
2.LocalMapping线程代码梳理
阅读LocalMapping线程的代码,就从其线程入口函数看起,这里的入口函数是LocalMapping.cc中的Run()函数,Run()函数也相当于LocalMapping的主函数。这个线程在系统运行起来的时候处于休眠或者等待状态。当有新的关键帧加入的时候线程就将自己设置为繁忙状态(告诉Tracking线程我很忙,暂时不接受新的关键帧)并立刻处理新的关键帧;当处理完一个关键帧后,就会将自己设置为空闲状态(告诉Tracking线程,我可以接受新的关键帧了)并进入睡眠状态3秒。当代码如下:
void LocalMapping::Run()
{
mbFinished = false;
while(1)
{
// Tracking will see that Local Mapping is busy
// 这个函数设置false后,Tracking线程通过AcceptKeyFrames函数判断LocalMapping线程处于繁忙状态
SetAcceptKeyFrames(false);
// Check if there are keyframes in the queue
//检查队列有没有关键帧
if(CheckNewKeyFrames())
{
// BoW conversion and insertion in Map
//处理新关键帧
ProcessNewKeyFrame(); //对应ORB-SLAM2框图的 1.KeyFrame Insertion
// Check recent MapPoints
//剔除地图中新添加但是质量不好的MapPoint
MapPointCulling(); //对应 2.Recent Mappoints culling
// Triangulate new MapPoints
//生成新的MapPoint
CreateNewMapPoints(); //对应 3.New points creation
if(!CheckNewKeyFrames())
{
// Find more matches in neighbor keyframes and fuse point duplications
SearchInNeighbors();
}
mbAbortBA = false;
if(!CheckNewKeyFrames() && !stopRequested())
{
// Local BA
//检查当前关键帧与相邻帧(两级相邻)重复的MapPoints
if(mpMap->KeyFramesInMap()>2)
//和当前关键帧相连的关键帧及MapPoints做局部BA优化
Optimizer::LocalBundleAdjustment(mpCurrentKeyFrame,&mbAbortBA, mpMap); //对应 4.Local BA
// Check redundant local Keyframes
//关键帧剔除
KeyFrameCulling(); //对应 5.Local keyframes culling
}
//关键帧插入到LoopClosing线程当中的关键帧队列里,LoopClosing线程执行操作
mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame);
}
else if(Stop())
{
// Safe area to stop
while(isStopped() && !CheckFinish())
{
usleep(3000);
}
if(CheckFinish())
break;
}
ResetIfRequested();
// Tracking will see that Local Mapping is busy
//这里的英文注释不对。应该是Tracking线程中此刻看到LocalMapping线程不忙才对,因为一个keyframe已经处理完了,需要Tracking线程插入下一个KeyFrame进来
SetAcceptKeyFrames(true);
if(CheckFinish())
break;
usleep(3000);
}
SetFinish();
}
可以看到,Run函数中的代码几乎和框架中LocalMapping线程是完全对应的。下面来具体看每一部分:
1)将关键帧插入地图中(对应KeyFrame Insertioin)
当有新的关键帧传入LocalMapping线程时,要将该关键帧插入到地图当中,所有的关键帧连接起来,我们就可以清晰的看到相机的运动轨迹。将关键帧插入到地图中的操作是在ProcessNewKeyFrame函数中进行的。Process
/**
* 处理新插入的关键帧
* 1.更新每一个MapPoint和关键帧之间的联系信息;
* 2.更新当前关键帧和其他关键帧之间的连接关系;
*/
void LocalMapping::ProcessNewKeyFrame()
{
{
unique_lock<mutex> lock(mMutexNewKFs);
mpCurrentKeyFrame = mlNewKeyFrames.front();
mlNewKeyFrames.pop_front();
}
// Compute Bags of Words structures
mpCurrentKeyFrame->ComputeBoW();
// Associate MapPoints to the new keyframe and update normal and descriptor
// 关联MapPoints到新的关键帧并且更新描述子
const vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
for(size_t i=0; i<vpMapPointMatches.size(); i++)
{
MapPoint* pMP = vpMapPointMatches[i];
if(pMP)
{
if(!pMP->isBad())
{
//判断pMP这个MapPoint中记录的能看到该MapPoint的关键帧列表中有没有mpCurrentKeyFrame这个关键帧
if(!pMP->IsInKeyFrame(mpCurrentKeyFrame))
{
pMP->AddObservation(mpCurrentKeyFrame, i);
pMP->UpdateNormalAndDepth();
pMP->ComputeDistinctiveDescriptors();
}
else // this can only happen for new stereo points inserted by the Tracking
{
mlpRecentAddedMapPoints.push_back(pMP);
}
}
}
}
// Update links in the Covisibility Graph
mpCurrentKeyFrame->UpdateConnections();
// Insert Keyframe in Map
// 关键帧插入到地图中
mpMap->AddKeyFrame(mpCurrentKeyFrame);
}
可以看出,这个函数中对当前关键帧中的MapPoint信息进行更新,并对关键帧的连接关系进行更新后,将当前关键帧插入了地图中。
更新MapPoint信息的函数如下:
(1)AddObservation函数
/**
* 往MapPoint中mObservations里添加关键帧
* mObservations[pKF]=idx; 表示pKF这个关键帧可以观察到当前的MapPoint实例,其index值为idx
*/
void MapPoint::AddObservation(KeyFrame* pKF, size_t idx)
{
unique_lock<mutex> lock(mMutexFeatures);
if(mObservations.count(pKF))
return;
mObservations[pKF]=idx;
//针对双目:
if(pKF->mvuRight[idx]>=0)
nObs+=2;
else
nObs++;
}
(2)UpdateNormalAndDepth函数
/**
* 更新MapPoint的深度
*/
void MapPoint::UpdateNormalAndDepth()
{
map<KeyFrame*,size_t> observations;
KeyFrame* pRefKF;
cv::Mat Pos;
{
unique_lock<mutex> lock1(mMutexFeatures);
unique_lock<mutex> lock2(mMutexPos);
if(mbBad)
return;
observations=mObservations;
pRefKF=mpRefKF;
Pos = mWorldPos.clone();
}
if(observations.empty())
return;
cv::Mat normal = cv::Mat::zeros(3,1,CV_32F);
int n=0;
//遍历能够观察到该MapPoint的关键帧,n用于统计关键帧的个数
for(map<KeyFrame*,size_t>::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKF = mit->first;
//获取相机光心
cv::Mat Owi = pKF->GetCameraCenter();
//该MapPoint的世界坐标位置减去光心坐标,计算深度
cv::Mat normali = mWorldPos - Owi;
//cv::norm(normali) 计算normali矩阵的范数
normal = normal + normali/cv::norm(normali);
n++;
}
//PC为当前的世界坐标减去参考关键帧的相机中心点坐标
cv::Mat PC = Pos - pRefKF->GetCameraCenter();
const float dist = cv::norm(PC);
const int level = pRefKF->mvKeysUn[observations[pRefKF]].octave;
const float levelScaleFactor = pRefKF->mvScaleFactors[level];
const int nLevels = pRefKF->mnScaleLevels;
{
unique_lock<mutex> lock3(mMutexPos);
mfMaxDistance = dist*levelScaleFactor;
mfMinDistance = mfMaxDistance/pRefKF->mvScaleFactors[nLevels-1];
mNormalVector = normal/n;
}
}
(3)ComputeDistinctiveDescriptors函数
/**
* 计算地图点对应的描述子,理论上来说,所有能看到该地图点的关键帧中都有该地图点对应的描述子
* 所以,需要从所有关键帧中选择最优的描述子。最优描述子的选择条件是:所有能看到该地图点的关键帧中,描述子距离其他帧中所有描述子平均距离最小的描述子
* */
void MapPoint::ComputeDistinctiveDescriptors()
{
// Retrieve all observed descriptors
//检索所有观察到的描述子
vector<cv::Mat> vDescriptors;
map<KeyFrame*,size_t> observations;
{
unique_lock<mutex> lock1(mMutexFeatures);
if(mbBad)
return;
observations=mObservations;
}
if(observations.empty())
return;
vDescriptors.reserve(observations.size());
//所有能看到该MapPoint的关键帧中对应的该地图点的描述子都存入vDescriptors中
for(map<KeyFrame*,size_t>::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKF = mit->first;
if(!pKF->isBad())
vDescriptors.push_back(pKF->mDescriptors.row(mit->second));
}
if(vDescriptors.empty())
return;
// Compute distances between them
const size_t N = vDescriptors.size();
float Distances[N][N];
/**
* 这个for循环得到了一个二维数组Distances[N][N],里边存放的是各个描述子之间的距离
* 比如Distances[0][1]里存放的是描述子0和 描述子1之间的距离
*/
for(size_t i = 0; i < N; i++)
{
Distances[i][i]=0;
for(size_t j = i+1; j < N; j++)
{
int distij = ORBmatcher::DescriptorDistance(vDescriptors[i],vDescriptors[j]);
Distances[i][j]=distij;
Distances[j][i]=distij;
}
}
// Take the descriptor with least median distance to the rest
int BestMedian = INT_MAX;
int BestIdx = 0;
//每一次for循环里计算出的是i这个描述子距离其他描述子的中值距离,将这个中值距离作为最好的中值距离,每次循环去更新,最后循环完成的时候得到的就是最小的中值距离
for(size_t i = 0; i < N; i++)
{
vector<int> vDists(Distances[i],Distances[i]+N);
sort(vDists.begin(),vDists.end());
//median为vDists数组中的中间值
int median = vDists[0.5*(N-1)];
if(median < BestMedian)
{
BestMedian = median;
BestIdx = i;
}
}
{
unique_lock<mutex> lock(mMutexFeatures);
//这里的描述子是所有描述子中距离其他描述子平均距离最小的那个描述子
mDescriptor = vDescriptors[BestIdx].clone();
}
}
更新关键帧连接关系的函数为UpdateConnections(),该函数代码如下:
/**
* 更新当前帧与其他帧之间的连接关系
*/
void KeyFrame::UpdateConnections()
{
map<KeyFrame*,int> KFcounter;
vector<MapPoint*> vpMP;
{
unique_lock<mutex> lockMPs(mMutexFeatures);
vpMP = mvpMapPoints;
}
//For all map points in keyframe check in which other keyframes are they seen
//Increase counter for those keyframes
//遍历当前关键帧所能观察到的mapPoint,查看该MapPoint对应的关键帧列表
for(vector<MapPoint*>::iterator vit=vpMP.begin(), vend=vpMP.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
if(!pMP)
continue;
if(pMP->isBad())
continue;
//observations表示pMP这个mapPoint可以被哪些关键帧观测到
map<KeyFrame*,size_t> observations = pMP->GetObservations();
//遍历pMP这个mapPoint对应的所有关键帧
for(map<KeyFrame*,size_t>::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
//判断该关键帧的id是否和当前关键帧id相同,并统计不相同的个数
if(mit->first->mnId==mnId)
continue;
//KFcounter的大小是除过当前关键帧之外的能够观察到这个mapPoint的关键帧的个数
//这里是对KFcounter.second进行+1操作,最后的second值为和当前帧共视的mappoint的个数
KFcounter[mit->first]++;
}
}
/**
* KFcounter中保存了和当前帧有共视同一个mappoint的keyframe的个数
*
*/
// This should not happen
if(KFcounter.empty())
return;
//If the counter is greater than threshold add connection
//In case no keyframe counter is over threshold add the one with maximum counter
int nmax=0;
KeyFrame* pKFmax=NULL;
int th = 15;
vector<pair<int,KeyFrame*> > vPairs;
vPairs.reserve(KFcounter.size());
//遍历KFcounter中统计的结果
for(map<KeyFrame*,int>::iterator mit=KFcounter.begin(), mend=KFcounter.end(); mit!=mend; mit++)
{
if(mit->second > nmax)
{
nmax=mit->second;//second是mappoint的个数
pKFmax=mit->first;//first是个keyframe
}
//表示mit->first这个关键帧和当前关键帧匹配的mappoint大于15,则更新两者的链接关系
if(mit->second >= th)
{
//vPairs中存放的为<匹配的mappoint个数,关键帧>
vPairs.push_back(make_pair(mit->second,mit->first));
(mit->first)->AddConnection(this,mit->second);
}
}
if(vPairs.empty())
{
vPairs.push_back(make_pair(nmax,pKFmax));
pKFmax->AddConnection(this,nmax);
}
//进行排序
sort(vPairs.begin(),vPairs.end());
list<KeyFrame*> lKFs;
list<int> lWs;
for(size_t i=0; i<vPairs.size();i++)
{
lKFs.push_front(vPairs[i].second);
lWs.push_front(vPairs[i].first);
}
{
unique_lock<mutex> lockCon(mMutexConnections);
// mspConnectedKeyFrames = spConnectedKeyFrames;
//mConnectedKeyFrameWeights中存储的是哪些keyframe和当前帧可以共同观察到的mappoint的个数
mConnectedKeyFrameWeights = KFcounter;
//和当前关键帧存在共同观察到的mappoint的关键帧
mvpOrderedConnectedKeyFrames = vector<KeyFrame*>(lKFs.begin(),lKFs.end());
//可以共同观察到的mappoint的数量
mvOrderedWeights = vector<int>(lWs.begin(), lWs.end());
//mbFirstConnection值在初始化KeyFrame的时候置为true
if(mbFirstConnection && mnId!=0)
{
mpParent = mvpOrderedConnectedKeyFrames.front();
mpParent->AddChild(this);
mbFirstConnection = false;
}
}
}
更新好了MapPoint和关键帧的相关关系后,接下来就是调用下面这句代码将关键帧插入地图点中。
// Insert Keyframe in Map
// 关键帧插入到地图中
mpMap->AddKeyFrame(mpCurrentKeyFrame);
AddKeyFrame函数代码如下。在Map中有个存储关键帧的列表,这里就是将当前关键帧插入到Map中的关键帧列表中,并且更新最大关键帧id值。
//往地图中增加关键帧
void Map::AddKeyFrame(KeyFrame *pKF)
{
unique_lock<mutex> lock(mMutexMap);
mspKeyFrames.insert(pKF);
if(pKF->mnId > mnMaxKFid)
mnMaxKFid=pKF->mnId;
}
2)剔除地图中不符合要求的MapPoint(对应Recent MapPoints Culling)
/**
* 剔除地图中不符合要求的MapPoint
*
*/
void LocalMapping::MapPointCulling()
{
// Check Recent Added MapPoints
list<MapPoint*>::iterator lit = mlpRecentAddedMapPoints.begin();
const unsigned long int nCurrentKFid = mpCurrentKeyFrame->mnId;
int nThObs;
if(mbMonocular)
nThObs = 2;
else
nThObs = 3;
const int cnThObs = nThObs;
while(lit!=mlpRecentAddedMapPoints.end())
{
MapPoint* pMP = *lit;
if(pMP->isBad())//地图点是否是好的
{
lit = mlpRecentAddedMapPoints.erase(lit);
}
else if(pMP->GetFoundRatio()<0.25f )//地图点的查找率
{
pMP->SetBadFlag();
lit = mlpRecentAddedMapPoints.erase(lit);
}
//该地图点第一关键帧(第一次观察到该地图点的帧id)与当前帧id相隔距离,和该关键点的被观察到的次数
else if(((int)nCurrentKFid-(int)pMP->mnFirstKFid)>=2 && pMP->Observations()<=cnThObs)
{
pMP->SetBadFlag();
lit = mlpRecentAddedMapPoints.erase(lit);
}
else if(((int)nCurrentKFid-(int)pMP->mnFirstKFid)>=3)
lit = mlpRecentAddedMapPoints.erase(lit);
else
lit++;
}
}
在这里查找率代码如下:
float MapPoint::GetFoundRatio()
{
unique_lock<mutex> lock(mMutexFeatures);
return static_cast<float>(mnFound)/mnVisible;
}
void MapPoint::IncreaseVisible(int n)
{
unique_lock<mutex> lock(mMutexFeatures);
mnVisible+=n;
}
void MapPoint::IncreaseFound(int n)
{
unique_lock<mutex> lock(mMutexFeatures);
mnFound+=n;
}
GetFoundRatio函数可以看出发现率是mnFound / mnVisible。其中mnFound是在IncreaseFound函数中增加值的,mnVisible是在IncreaseVisible函数中增加值的。这两个函数被调用的地方可以在代码中看一下。
3)创建MapPoint(对应New Points Creation)
/**
* 运动过程中和共视程度比较高的关键帧通过三角化恢复出一些MapPoints
*
*/
void LocalMapping::CreateNewMapPoints()
{
// Retrieve neighbor keyframes in covisibility graph
// 在共视图中检索相邻关键帧
int nn = 10;
//单目相机,需要取出20个共视关键帧
if(mbMonocular)
nn=20;
const vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(nn);
ORBmatcher matcher(0.6,false);
cv::Mat Rcw1 = mpCurrentKeyFrame->GetRotation();
cv::Mat Rwc1 = Rcw1.t();
cv::Mat tcw1 = mpCurrentKeyFrame->GetTranslation();
//构建转换矩阵
cv::Mat Tcw1(3,4,CV_32F);
Rcw1.copyTo(Tcw1.colRange(0,3));
tcw1.copyTo(Tcw1.col(3));
cv::Mat Ow1 = mpCurrentKeyFrame->GetCameraCenter();
const float &fx1 = mpCurrentKeyFrame->fx;
const float &fy1 = mpCurrentKeyFrame->fy;
const float &cx1 = mpCurrentKeyFrame->cx;
const float &cy1 = mpCurrentKeyFrame->cy;
const float &invfx1 = mpCurrentKeyFrame->invfx;
const float &invfy1 = mpCurrentKeyFrame->invfy;
const float ratioFactor = 1.5f*mpCurrentKeyFrame->mfScaleFactor;
int nnew=0;
// Search matches with epipolar restriction and triangulate
//遍历相邻关键帧
for(size_t i=0; i<vpNeighKFs.size(); i++)
{
if(i>0 && CheckNewKeyFrames())
return;
KeyFrame* pKF2 = vpNeighKFs[i];
// Check first that baseline is not too short
cv::Mat Ow2 = pKF2->GetCameraCenter();
//计算基线向量
cv::Mat vBaseline = Ow2-Ow1;
//计算基线的长度
const float baseline = cv::norm(vBaseline);
if(!mbMonocular)
{
//非单目的情况
if(baseline < pKF2->mb)
continue;// 如果是立体相机,关键帧间距太小时不生成3D点
}
else
{
//单目情况
const float medianDepthKF2 = pKF2->ComputeSceneMedianDepth(2);
//计算baseline与景深度中值的比例
const float ratioBaselineDepth = baseline/medianDepthKF2;
//如果特别远(比例特别小),那么不考虑当前邻接的关键帧,不生成3D点
if(ratioBaselineDepth < 0.01)
continue;
}
// Compute Fundamental Matrix
// 计算两个关键帧之间的基础矩阵
cv::Mat F12 = ComputeF12(mpCurrentKeyFrame,pKF2);
// Search matches that fullfil epipolar constraint
// 用对极约束来约束匹配时的搜索范围,对满足对极约束的特征点进行特征点匹配,vMatchedIndices为匹配结果
vector<pair<size_t,size_t> > vMatchedIndices;
matcher.SearchForTriangulation(mpCurrentKeyFrame,pKF2,F12,vMatchedIndices,false);
cv::Mat Rcw2 = pKF2->GetRotation();
cv::Mat Rwc2 = Rcw2.t();
cv::Mat tcw2 = pKF2->GetTranslation();
cv::Mat Tcw2(3,4,CV_32F);
Rcw2.copyTo(Tcw2.colRange(0,3));
tcw2.copyTo(Tcw2.col(3));
const float &fx2 = pKF2->fx;
const float &fy2 = pKF2->fy;
const float &cx2 = pKF2->cx;
const float &cy2 = pKF2->cy;
const float &invfx2 = pKF2->invfx;
const float &invfy2 = pKF2->invfy;
// Triangulate each match 对每对匹配的2D特征点对通过三角化生成3D点
const innewnt nmatches = vMatchedIndices.size();
for(int ikp=0; ikp<nmatches; ikp++)
{
// 当前匹配对在当前关键帧中的索引
const int &idx1 = vMatchedIndices[ikp].first;
// 当前匹配对在邻接关键帧中的索引
const int &idx2 = vMatchedIndices[ikp].second;
const cv::KeyPoint &kp1 = mpCurrentKeyFrame->mvKeysUn[idx1];
const float kp1_ur=mpCurrentKeyFrame->mvuRight[idx1];
bool bStereo1 = kp1_ur>=0;
const cv::KeyPoint &kp2 = pKF2->mvKeysUn[idx2];
const float kp2_ur = pKF2->mvuRight[idx2];
bool bStereo2 = kp2_ur>=0;
// Check parallax between rays
/**
* 检查光线之间的视差
* 这里的kp1.pt.x为kp1这个关键点在图像中的x轴坐标,cx1为像素坐标中原点的移动距离,invfx1为对应fx的倒数也就是缩放量
* 则(kp1.pt.x-cx1)*invfx1计算的是在像素坐标中该关键点x轴方向上的坐标
* (kp1.pt.y-cy1)*invfy1计算的是在像素坐标中该关键点在y轴方向上的坐标
* xn1为kp1这个关键点在归一化平面的像素坐标向量
*/
cv::Mat xn1 = (cv::Mat_<float>(3,1) << (kp1.pt.x-cx1)*invfx1, (kp1.pt.y-cy1)*invfy1, 1.0);
cv::Mat xn2 = (cv::Mat_<float>(3,1) << (kp2.pt.x-cx2)*invfx2, (kp2.pt.y-cy2)*invfy2, 1.0);
//ray1为关键点kp1所对应的三维世界中的点的坐标,相机坐标--->世界坐标
cv::Mat ray1 = Rwc1*xn1;
//ray2为关键点kp2所对应的三维世界中的点的坐标
cv::Mat ray2 = Rwc2*xn2;
//(向量a × 向量b)/(向量a的模 × 向量b的模)= 夹角余弦值
// 向量乘积:a * b = |a|*|b|*cos<a,b> 推出:cos<a,b> = (a * b)/(|a|*|b|)
const float cosParallaxRays = ray1.dot(ray2)/(cv::norm(ray1)*cv::norm(ray2));
//cosParallaxRays加1是为了让cosParallaxStereo随便初始化为一个很大的值
float cosParallaxStereo = cosParallaxRays + 1;
float cosParallaxStereo1 = cosParallaxStereo;
float cosParallaxStereo2 = cosParallaxStereo;
//双目相机,利用双目基线 深度 得到视差角
if(bStereo1)
cosParallaxStereo1 = cos(2*atan2(mpCurrentKeyFrame->mb/2,mpCurrentKeyFrame->mvDepth[idx1]));
else if(bStereo2)
cosParallaxStereo2 = cos(2*atan2(pKF2->mb/2,pKF2->mvDepth[idx2]));
//双目观测视差角
cosParallaxStereo = min(cosParallaxStereo1,cosParallaxStereo2);
//开始三角化恢复3D点
cv::Mat x3D;
if(cosParallaxRays < cosParallaxStereo && cosParallaxRays > 0 && (bStereo1 || bStereo2 || cosParallaxRays<0.9998))
{
// Linear Triangulation Method
cv::Mat A(4,4,CV_32F);
A.row(0) = xn1.at<float>(0)*Tcw1.row(2)-Tcw1.row(0);
A.row(1) = xn1.at<float>(1)*Tcw1.row(2)-Tcw1.row(1);
A.row(2) = xn2.at<float>(0)*Tcw2.row(2)-Tcw2.row(0);
A.row(3) = xn2.at<float>(1)*Tcw2.row(2)-Tcw2.row(1);
cv::Mat w,u,vt;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
x3D = vt.row(3).t();
if(x3D.at<float>(3)==0)
continue;
// Euclidean coordinates 欧式坐标
x3D = x3D.rowRange(0,3)/x3D.at<float>(3);
}
else if(bStereo1 && cosParallaxStereo1<cosParallaxStereo2)
{
x3D = mpCurrentKeyFrame->UnprojectStereo(idx1);
}
else if(bStereo2 && cosParallaxStereo2<cosParallaxStereo1)
{
x3D = pKF2->UnprojectStereo(idx2);
}
else
continue; //No stereo and very low parallax
cv::Mat x3Dt = x3D.t();
//Check triangulation in front of cameras
//检查恢复的3D点深度是否大于零
float z1 = Rcw1.row(2).dot(x3Dt)+tcw1.at<float>(2);
if(z1<=0)
continue;
float z2 = Rcw2.row(2).dot(x3Dt)+tcw2.at<float>(2);
if(z2<=0)
continue;
//Check reprojection error in first keyframe
//检查第一个关键帧中的重投影误差
const float &sigmaSquare1 = mpCurrentKeyFrame->mvLevelSigma2[kp1.octave];
const float x1 = Rcw1.row(0).dot(x3Dt)+tcw1.at<float>(0);
const float y1 = Rcw1.row(1).dot(x3Dt)+tcw1.at<float>(1);
const float invz1 = 1.0/z1;
if(!bStereo1)
{
//计算重投影出来的像素坐标
float u1 = fx1*x1*invz1+cx1;
float v1 = fy1*y1*invz1+cy1;
//计算重投影误差
float errX1 = u1 - kp1.pt.x;
float errY1 = v1 - kp1.pt.y;
//误差距离判断
if((errX1*errX1+errY1*errY1)>5.991*sigmaSquare1)
continue;
}
else
{
float u1 = fx1*x1*invz1+cx1;
float u1_r = u1 - mpCurrentKeyFrame->mbf*invz1;
float v1 = fy1*y1*invz1+cy1;
float errX1 = u1 - kp1.pt.x;
float errY1 = v1 - kp1.pt.y;
float errX1_r = u1_r - kp1_ur;
if((errX1*errX1+errY1*errY1+errX1_r*errX1_r)>7.8*sigmaSquare1)
continue;
}
//Check reprojection error in second keyframe
//检查第二个关键帧中的重投影误差
const float sigmaSquare2 = pKF2->mvLevelSigma2[kp2.octave];
const float x2 = Rcw2.row(0).dot(x3Dt)+tcw2.at<float>(0);
const float y2 = Rcw2.row(1).dot(x3Dt)+tcw2.at<float>(1);
const float invz2 = 1.0/z2;
if(!bStereo2)
{
float u2 = fx2*x2*invz2+cx2;
float v2 = fy2*y2*invz2+cy2;
float errX2 = u2 - kp2.pt.x;
float errY2 = v2 - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991*sigmaSquare2)
continue;
}
else
{
float u2 = fx2*x2*invz2+cx2;
float u2_r = u2 - mpCurrentKeyFrame->mbf*invz2;
float v2 = fy2*y2*invz2+cy2;
float errX2 = u2 - kp2.pt.x;
float errY2 = v2 - kp2.pt.y;
float errX2_r = u2_r - kp2_ur;
if((errX2*errX2+errY2*errY2+errX2_r*errX2_r)>7.8*sigmaSquare2)
continue;
}
//Check scale consistency
//检查尺度连续性
cv::Mat normal1 = x3D-Ow1;
float dist1 = cv::norm(normal1);
cv::Mat normal2 = x3D-Ow2;
float dist2 = cv::norm(normal2);
//两帧中有任意一帧的关键点三维坐标和相机中心之间距离为0,则不合要求
if(dist1==0 || dist2==0)
continue;
const float ratioDist = dist2/dist1;
const float ratioOctave = mpCurrentKeyFrame->mvScaleFactors[kp1.octave]/pKF2->mvScaleFactors[kp2.octave];
/*if(fabs(ratioDist-ratioOctave)>ratioFactor)
continue;*/
if(ratioDist*ratioFactor < ratioOctave || ratioDist > ratioOctave*ratioFactor)
continue;
// Triangulation is succesfull
//三角化成功,创建地图点
MapPoint* pMP = new MapPoint(x3D,mpCurrentKeyFrame,mpMap);
pMP->AddObservation(mpCurrentKeyFrame,idx1);
pMP->AddObservation(pKF2,idx2);
//地图点加入关键帧中
mpCurrentKeyFrame->AddMapPoint(pMP,idx1);
pKF2->AddMapPoint(pMP,idx2);
//计算该地图点的描述子
pMP->ComputeDistinctiveDescriptors();
//更新该地图点的平均观测方向和深度范围
pMP->UpdateNormalAndDepth();
//地图点加入全局Map
mpMap->AddMapPoint(pMP);
mlpRecentAddedMapPoints.push_back(pMP);
nnew++;
}
}
}
计算场景深度的函数ComputeSceneMedianDepth代码如下:
/**
* 计算关键帧的场景深度中值
*/
float KeyFrame::ComputeSceneMedianDepth(const int q)
{
vector<MapPoint*> vpMapPoints;
cv::Mat Tcw_;
{
unique_lock<mutex> lock(mMutexFeatures);
unique_lock<mutex> lock2(mMutexPose);
vpMapPoints = mvpMapPoints;
Tcw_ = Tcw.clone();
}
vector<float> vDepths;
vDepths.reserve(N);
//找出当前帧z方向上的旋转
cv::Mat Rcw2 = Tcw_.row(2).colRange(0,3);
Rcw2 = Rcw2.t();
//找出当前帧z方向行的平移
float zcw = Tcw_.at<float>(2,3);
for(int i=0; i<N; i++)
{
if(mvpMapPoints[i])
{
MapPoint* pMP = mvpMapPoints[i];
//获取每个地图点的世界坐标
cv::Mat x3Dw = pMP->GetWorldPos();
//当前帧z方向上的旋转和地图点的世界坐标系相乘再加上z轴方向上的平移,获得的是地图点在当前相机坐标系中z轴的位置
float z = Rcw2.dot(x3Dw)+zcw;
vDepths.push_back(z);
}
}
//排序
sort(vDepths.begin(),vDepths.end());
//求深度的平均值
return vDepths[(vDepths.size()-1)/q];
}
4)局部BA优化(对应Local BA)
/**
* 局部优化函数步骤如下:
* 1.创建局部关键帧列表, 即那些共享CovisbilityMap的Keyframes. 存入lLocalKeyFrames中.
* 2.找到所有Local Keyframes都能看到的地图点, 其实就是CovisbilityMap的地图点. 存入lLocalMapPoints.
* 3.再找出能看到上面的地图点, 却不在Local Keyframe范围内的keyframe(为什么??). 存入lFixedCameras.
* 4.把上面的Local Keyframe, Map Point, FixedCamera都设置为图节点.
* 5.对于lLocalMapPoints中的每一个地图点及能看到它的所有keyframes, 建立边:
* 边的两端分别是keyframe的位姿与当前地图点为位姿.
* 边的观测值为该地图点在当前keyframe中的二维位置.
* 信息矩阵(权重)是观测值的偏离程度, 即3D地图点反投影回地图的误差.
* 6.去除掉一些不符合标准的边.
* 7.把优化后地图点和keyframe位姿放回去.
*
*/
void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap)
{
// Local KeyFrames: First Breath Search from Current Keyframe
list<KeyFrame*> lLocalKeyFrames;
lLocalKeyFrames.push_back(pKF);
pKF->mnBALocalForKF = pKF->mnId;
//1.获取局部关键帧列表存放在lLocalKeyFrames中。也就是和当前关键帧有共视关系的关键帧列表。
const vector<KeyFrame*> vNeighKFs = pKF->GetVectorCovisibleKeyFrames();
for(int i=0, iend=vNeighKFs.size(); i<iend; i++)
{
KeyFrame* pKFi = vNeighKFs[i];
pKFi->mnBALocalForKF = pKF->mnId;
if(!pKFi->isBad())
lLocalKeyFrames.push_back(pKFi);
}
// Local MapPoints seen in Local KeyFrames
//2.创建在局部关键帧中所看到的局部地图点列表,存放在lLocalMapPoints中。
list<MapPoint*> lLocalMapPoints;
for(list<KeyFrame*>::iterator lit=lLocalKeyFrames.begin() , lend=lLocalKeyFrames.end(); lit!=lend; lit++)
{
vector<MapPoint*> vpMPs = (*lit)->GetMapPointMatches();
for(vector<MapPoint*>::iterator vit=vpMPs.begin(), vend=vpMPs.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
if(pMP)
if(!pMP->isBad())
if(pMP->mnBALocalForKF!=pKF->mnId)
{
lLocalMapPoints.push_back(pMP);
pMP->mnBALocalForKF=pKF->mnId;
}
}
}
// Fixed Keyframes. Keyframes that see Local MapPoints but that are not Local Keyframes
//3.创建固定关键帧列表lFixedCameras。这些关键帧可以看到局部地图点,但是这些关键帧并不是局部关键帧。
list<KeyFrame*> lFixedCameras;
for(list<MapPoint*>::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++)
{
map<KeyFrame*,size_t> observations = (*lit)->GetObservations();
for(map<KeyFrame*,size_t>::iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKFi = mit->first;
if(pKFi->mnBALocalForKF!=pKF->mnId && pKFi->mnBAFixedForKF!=pKF->mnId)
{
pKFi->mnBAFixedForKF=pKF->mnId;
if(!pKFi->isBad())
lFixedCameras.push_back(pKFi);
}
}
}
// Setup optimizer
g2o::SparseOptimizer optimizer;
g2o::BlockSolver_6_3::LinearSolverType * linearSolver;
linearSolver = new g2o::LinearSolverEigen<g2o::BlockSolver_6_3::PoseMatrixType>();
g2o::BlockSolver_6_3 * solver_ptr = new g2o::BlockSolver_6_3(linearSolver);
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
if(pbStopFlag)
optimizer.setForceStopFlag(pbStopFlag);
unsigned long maxKFid = 0;
// Set Local KeyFrame vertices
// 4. 设置局部关键帧顶点并加入优化器中。
for(list<KeyFrame*>::iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++)
{
KeyFrame* pKFi = *lit;
g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap();
vSE3->setEstimate(Converter::toSE3Quat(pKFi->GetPose()));
vSE3->setId(pKFi->mnId);
vSE3->setFixed(pKFi->mnId==0);
optimizer.addVertex(vSE3);
if(pKFi->mnId>maxKFid)
maxKFid=pKFi->mnId;
}
// Set Fixed KeyFrame vertices
// 5. 设置固定关键帧顶点,并加入到优化器列表中。
for(list<KeyFrame*>::iterator lit=lFixedCameras.begin(), lend=lFixedCameras.end(); lit!=lend; lit++)
{
KeyFrame* pKFi = *lit;
g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap();
vSE3->setEstimate(Converter::toSE3Quat(pKFi->GetPose()));
vSE3->setId(pKFi->mnId);
vSE3->setFixed(true);
optimizer.addVertex(vSE3);
if(pKFi->mnId>maxKFid)
maxKFid=pKFi->mnId;
}
// Set MapPoint vertices
// 6.设置MapPoint顶点
const int nExpectedSize = (lLocalKeyFrames.size()+lFixedCameras.size())*lLocalMapPoints.size();
vector<g2o::EdgeSE3ProjectXYZ*> vpEdgesMono;
vpEdgesMono.reserve(nExpectedSize);
vector<KeyFrame*> vpEdgeKFMono;
vpEdgeKFMono.reserve(nExpectedSize);
vector<MapPoint*> vpMapPointEdgeMono;
vpMapPointEdgeMono.reserve(nExpectedSize);
vector<g2o::EdgeStereoSE3ProjectXYZ*> vpEdgesStereo;
vpEdgesStereo.reserve(nExpectedSize);
vector<KeyFrame*> vpEdgeKFStereo;
vpEdgeKFStereo.reserve(nExpectedSize);
vector<MapPoint*> vpMapPointEdgeStereo;
vpMapPointEdgeStereo.reserve(nExpectedSize);
const float thHuberMono = sqrt(5.991);
const float thHuberStereo = sqrt(7.815);
// 7.遍历局部地图点列表,设置优化对应的边
for(list<MapPoint*>::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++)
{
MapPoint* pMP = *lit;
g2o::VertexSBAPointXYZ* vPoint = new g2o::VertexSBAPointXYZ();
vPoint->setEstimate(Converter::toVector3d(pMP->GetWorldPos()));
int id = pMP->mnId+maxKFid+1;
vPoint->setId(id);
vPoint->setMarginalized(true);
optimizer.addVertex(vPoint);
const map<KeyFrame*,size_t> observations = pMP->GetObservations();
//Set edges
for(map<KeyFrame*,size_t>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKFi = mit->first;
if(!pKFi->isBad())
{
const cv::KeyPoint &kpUn = pKFi->mvKeysUn[mit->second];
// Monocular observation
if(pKFi->mvuRight[mit->second]<0)
{
Eigen::Matrix<double,2,1> obs;
obs << kpUn.pt.x, kpUn.pt.y;
g2o::EdgeSE3ProjectXYZ* e = new g2o::EdgeSE3ProjectXYZ();
e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(id)));
e->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(pKFi->mnId)));
e->setMeasurement(obs);
const float &invSigma2 = pKFi->mvInvLevelSigma2[kpUn.octave];
e->setInformation(Eigen::Matrix2d::Identity()*invSigma2);
g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
e->setRobustKernel(rk);
rk->setDelta(thHuberMono);
e->fx = pKFi->fx;
e->fy = pKFi->fy;
e->cx = pKFi->cx;
e->cy = pKFi->cy;
optimizer.addEdge(e);
vpEdgesMono.push_back(e);
vpEdgeKFMono.push_back(pKFi);
vpMapPointEdgeMono.push_back(pMP);
}
else // Stereo observation
{
Eigen::Matrix<double,3,1> obs;
const float kp_ur = pKFi->mvuRight[mit->second];
obs << kpUn.pt.x, kpUn.pt.y, kp_ur;
g2o::EdgeStereoSE3ProjectXYZ* e = new g2o::EdgeStereoSE3ProjectXYZ();
e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(id)));
e->setVertex(1, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(pKFi->mnId)));
e->setMeasurement(obs);
const float &invSigma2 = pKFi->mvInvLevelSigma2[kpUn.octave];
Eigen::Matrix3d Info = Eigen::Matrix3d::Identity()*invSigma2;
e->setInformation(Info);
g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
e->setRobustKernel(rk);
rk->setDelta(thHuberStereo);
e->fx = pKFi->fx;
e->fy = pKFi->fy;
e->cx = pKFi->cx;
e->cy = pKFi->cy;
e->bf = pKFi->mbf;
optimizer.addEdge(e);
vpEdgesStereo.push_back(e);
vpEdgeKFStereo.push_back(pKFi);
vpMapPointEdgeStereo.push_back(pMP);
}
}
}
}
if(pbStopFlag)
if(*pbStopFlag)
return;
//8.进行优化。这里进行了5次迭代优化。
optimizer.initializeOptimization();
optimizer.optimize(5);
bool bDoMore= true;
if(pbStopFlag)
if(*pbStopFlag)
bDoMore = false;
// 9.检查上边5次迭代优化正确点的观测值,并将异常点排除掉。完成后再次进行10次迭代优化。
if(bDoMore)
{
// Check inlier observations
for(size_t i=0, iend=vpEdgesMono.size(); i<iend;i++)
{
g2o::EdgeSE3ProjectXYZ* e = vpEdgesMono[i];
MapPoint* pMP = vpMapPointEdgeMono[i];
if(pMP->isBad())
continue;
if(e->chi2()>5.991 || !e->isDepthPositive())
{
e->setLevel(1);
}
e->setRobustKernel(0);
}
for(size_t i=0, iend=vpEdgesStereo.size(); i<iend;i++)
{
g2o::EdgeStereoSE3ProjectXYZ* e = vpEdgesStereo[i];
MapPoint* pMP = vpMapPointEdgeStereo[i];
if(pMP->isBad())
continue;
if(e->chi2()>7.815 || !e->isDepthPositive())
{
e->setLevel(1);
}
e->setRobustKernel(0);
}
// Optimize again without the outliers
//排除掉不好的点后,再次进行优化
optimizer.initializeOptimization(0);
optimizer.optimize(10);
}
vector<pair<KeyFrame*,MapPoint*> > vToErase;
vToErase.reserve(vpEdgesMono.size()+vpEdgesStereo.size());
// Check inlier observations
//10.优化后进行优化结果检查
for(size_t i=0, iend=vpEdgesMono.size(); i<iend;i++)
{
g2o::EdgeSE3ProjectXYZ* e = vpEdgesMono[i];
MapPoint* pMP = vpMapPointEdgeMono[i];
if(pMP->isBad())
continue;
if(e->chi2()>5.991 || !e->isDepthPositive())
{
KeyFrame* pKFi = vpEdgeKFMono[i];
vToErase.push_back(make_pair(pKFi,pMP));
}
}
for(size_t i=0, iend=vpEdgesStereo.size(); i<iend;i++)
{
g2o::EdgeStereoSE3ProjectXYZ* e = vpEdgesStereo[i];
MapPoint* pMP = vpMapPointEdgeStereo[i];
if(pMP->isBad())
continue;
if(e->chi2()>7.815 || !e->isDepthPositive())
{
KeyFrame* pKFi = vpEdgeKFStereo[i];
vToErase.push_back(make_pair(pKFi,pMP));
}
}
// Get Map Mutex
unique_lock<mutex> lock(pMap->mMutexMapUpdate);
if(!vToErase.empty())
{
for(size_t i=0;i<vToErase.size();i++)
{
KeyFrame* pKFi = vToErase[i].first;
MapPoint* pMPi = vToErase[i].second;
pKFi->EraseMapPointMatch(pMPi);
pMPi->EraseObservation(pKFi);
}
}
// Recover optimized data
//11.设置优化后的数据。
//Keyframes
for(list<KeyFrame*>::iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++)
{
KeyFrame* pKF = *lit;
g2o::VertexSE3Expmap* vSE3 = static_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(pKF->mnId));
g2o::SE3Quat SE3quat = vSE3->estimate();
pKF->SetPose(Converter::toCvMat(SE3quat));
}
//Points
for(list<MapPoint*>::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++)
{
MapPoint* pMP = *lit;
g2o::VertexSBAPointXYZ* vPoint = static_cast<g2o::VertexSBAPointXYZ*>(optimizer.vertex(pMP->mnId+maxKFid+1));
pMP->SetWorldPos(Converter::toCvMat(vPoint->estimate()));
pMP->UpdateNormalAndDepth();
}
}
5)剔除关键帧(对应Local KeyFrames Culling)
/**
* 其90%以上的MapPoints能被其它共视关键帧(至少3个)观测到的关键帧,视为冗余关键帧
*
*/
void LocalMapping::KeyFrameCulling()
{
// Check redundant keyframes (only local keyframes)
// A keyframe is considered redundant if the 90% of the MapPoints it sees, are seen
// in at least other 3 keyframes (in the same or finer scale)
// We only consider close stereo points
// 检查冗余关键帧。一个关键帧的地图点中90%的地图点可以被至少其他3个相同或者更小等级(这里的等级是图像金字塔的层数)关键帧看到,则认为这个关键帧是冗余的。
vector<KeyFrame*> vpLocalKeyFrames = mpCurrentKeyFrame->GetVectorCovisibleKeyFrames();
for(vector<KeyFrame*>::iterator vit=vpLocalKeyFrames.begin(), vend=vpLocalKeyFrames.end(); vit!=vend; vit++)
{
KeyFrame* pKF = *vit;
if(pKF->mnId==0)
continue;
const vector<MapPoint*> vpMapPoints = pKF->GetMapPointMatches();
int nObs = 3;
const int thObs=nObs;
int nRedundantObservations=0;
int nMPs=0;
for(size_t i=0, iend=vpMapPoints.size(); i<iend; i++)
{
MapPoint* pMP = vpMapPoints[i];
if(pMP)
{
if(!pMP->isBad())
{
if(!mbMonocular)
{
if(pKF->mvDepth[i]>pKF->mThDepth || pKF->mvDepth[i]<0)
continue;
}
//MapPoint计数
nMPs++;
//该pMP是否可以被大于3个的关键帧看到
if(pMP->Observations()>thObs)
{
const int &scaleLevel = pKF->mvKeysUn[i].octave;
const map<KeyFrame*, size_t> observations = pMP->GetObservations();
int nObs=0;
for(map<KeyFrame*, size_t>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++)
{
KeyFrame* pKFi = mit->first;
if(pKFi==pKF)
continue;
// 获取关键点在金字塔图像中所处的层数
const int &scaleLeveli = pKFi->mvKeysUn[mit->second].octave;
//pKFi的关键点所处的层数<=scaleLevel+1
//为什么要用层数来判断呢?还没有想明白
if(scaleLeveli <= scaleLevel+1)
{
//共视关键帧计数
nObs++;
if(nObs>=thObs)
break;
}
}
//共视关键帧大于等于3判断
if(nObs>=thObs)
{
nRedundantObservations++;
}
}
}
}
}
if(nRedundantObservations>0.9*nMPs)
//关键帧设置为bag,即被认为是冗余关键帧
pKF->SetBadFlag();
}
}
6)冗余地图点的融合
在Run函数中可以看到调用了SearchInNeighbors函数,在这个函数中进行了地图中冗余MapPoint的融合。
那么,什么是MapPoint冗余问题呢?简单来说,我们知道通过两个关键帧中匹配的特征点能够在地图中恢复出许多其对应的MapPoint,这样,每个关键帧就有了一个在地图中对应的MapPoint列表。那么,当地图中关键帧比较多的时候,就会出现许多MapPoint。这时候就会出现这样的情况,当有一个新的关键帧插入地图中的时候,可以遍历地图中的MapPoint,如果能计算出地图中的某个MapPoint在新的关键帧中对应的特征点,并且MapPoint的描述子和该特征点的描述子还能匹配,则用当前新的关键帧中特征点对应的MapPoint替换掉地图中的MapPoint。
void LocalMapping::SearchInNeighbors()
{
// Retrieve neighbor keyframes
int nn = 10;
if(mbMonocular)
nn=20;
// 获取nn个和当前关键帧有共视关系的关键帧列表
const vector<KeyFrame*> vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(nn);
vector<KeyFrame*> vpTargetKFs;
// 遍历共视关键帧以及共视关键帧的共视关键帧列表并将其存放在vpTargetKFs列表中
for(vector<KeyFrame*>::const_iterator vit=vpNeighKFs.begin(), vend=vpNeighKFs.end(); vit!=vend; vit++)
{
KeyFrame* pKFi = *vit;
if(pKFi->isBad() || pKFi->mnFuseTargetForKF == mpCurrentKeyFrame->mnId)
continue;
vpTargetKFs.push_back(pKFi);
pKFi->mnFuseTargetForKF = mpCurrentKeyFrame->mnId;
// Extend to some second neighbors
const vector<KeyFrame*> vpSecondNeighKFs = pKFi->GetBestCovisibilityKeyFrames(5);
for(vector<KeyFrame*>::const_iterator vit2=vpSecondNeighKFs.begin(), vend2=vpSecondNeighKFs.end(); vit2!=vend2; vit2++)
{
KeyFrame* pKFi2 = *vit2;
if(pKFi2->isBad() || pKFi2->mnFuseTargetForKF==mpCurrentKeyFrame->mnId || pKFi2->mnId==mpCurrentKeyFrame->mnId)
continue;
vpTargetKFs.push_back(pKFi2);
}
}
// Search matches by projection from current KF in target KFs
//遍历上面建立的关键帧列表,对每个关键帧和当前关键帧进行MapPoint融合操作
ORBmatcher matcher;
vector<MapPoint*> vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
for(vector<KeyFrame*>::iterator vit=vpTargetKFs.begin(), vend=vpTargetKFs.end(); vit!=vend; vit++)
{
KeyFrame* pKFi = *vit;
//地图点融合函数
matcher.Fuse(pKFi,vpMapPointMatches);
}
// Search matches by projection from target KFs in current KF
//将关键帧和所有共视关键帧的MapPoint进行融合
vector<MapPoint*> vpFuseCandidates;
vpFuseCandidates.reserve(vpTargetKFs.size()*vpMapPointMatches.size());
for(vector<KeyFrame*>::iterator vitKF=vpTargetKFs.begin(), vendKF=vpTargetKFs.end(); vitKF!=vendKF; vitKF++)
{
KeyFrame* pKFi = *vitKF;
vector<MapPoint*> vpMapPointsKFi = pKFi->GetMapPointMatches();
for(vector<MapPoint*>::iterator vitMP=vpMapPointsKFi.begin(), vendMP=vpMapPointsKFi.end(); vitMP!=vendMP; vitMP++)
{
MapPoint* pMP = *vitMP;
if(!pMP)
continue;
if(pMP->isBad() || pMP->mnFuseCandidateForKF == mpCurrentKeyFrame->mnId)
continue;
pMP->mnFuseCandidateForKF = mpCurrentKeyFrame->mnId;
vpFuseCandidates.push_back(pMP);
}
}
matcher.Fuse(mpCurrentKeyFrame,vpFuseCandidates);
// Update points
vpMapPointMatches = mpCurrentKeyFrame->GetMapPointMatches();
for(size_t i=0, iend=vpMapPointMatches.size(); i<iend; i++)
{
MapPoint* pMP=vpMapPointMatches[i];
if(pMP)
{
if(!pMP->isBad())
{
pMP->ComputeDistinctiveDescriptors();
pMP->UpdateNormalAndDepth();
}
}
}
// Update connections in covisibility graph
mpCurrentKeyFrame->UpdateConnections();
}
其中的融合函数Fuse代码如下:
int ORBmatcher::Fuse(KeyFrame *pKF, const vector<MapPoint *> &vpMapPoints, const float th)
{
cv::Mat Rcw = pKF->GetRotation();
cv::Mat tcw = pKF->GetTranslation();
//相机内参
const float &fx = pKF->fx;
const float &fy = pKF->fy;
const float &cx = pKF->cx;
const float &cy = pKF->cy;
const float &bf = pKF->mbf;
//获取相机中心
cv::Mat Ow = pKF->GetCameraCenter();
int nFused=0;
//地图点个数
const int nMPs = vpMapPoints.size();
//遍历地图点
for(int i=0; i<nMPs; i++)
{
MapPoint* pMP = vpMapPoints[i];
if(!pMP)
continue;
if(pMP->isBad() || pMP->IsInKeyFrame(pKF))
continue;
cv::Mat p3Dw = pMP->GetWorldPos();
//计算地图点对应的相机坐标
cv::Mat p3Dc = Rcw*p3Dw + tcw;
// Depth must be positive 深度值为正的判断
if(p3Dc.at<float>(2) < 0.0f)
continue;
const float invz = 1/p3Dc.at<float>(2);
//计算归一化平面坐标
const float x = p3Dc.at<float>(0)*invz;
const float y = p3Dc.at<float>(1)*invz;
//像素坐标
const float u = fx*x+cx;
const float v = fy*y+cy;
// Point must be inside the image 判断计算出来的像素坐标是否在图像里边
if(!pKF->IsInImage(u,v))
continue;
const float ur = u-bf*invz;
/**
* 深度范围:地图点到参考帧(只有一帧)相机中心距离,乘上参考帧中描述子获取时金字塔放大尺度,得到最大距离mfMaxDistance;
* 最大距离除以整个金字塔最高层的放大尺度得到最小距离mfMinDistance。
* 通常来说,距离较近的地图点,将在金字塔层数较高的地方提取出;
* 距离较远的地图点,在金字塔层数较低的地方提取出(金字塔层数越低,分辨率越高,才能识别出远点)。
* 因此通过地图点的信息(主要是对应描述子),我们可以获得该地图点对应的金字塔层级:
* const int level = pRefKF->mvKeysUn[observations[pRefKF]].octave;
* 从而预测该地图点在什么距离范围内能够被观测到!
*/
const float maxDistance = pMP->GetMaxDistanceInvariance();
const float minDistance = pMP->GetMinDistanceInvariance();
//世界坐标减去相机中心坐标
cv::Mat PO = p3Dw-Ow;
//计算点的世界坐标减去相机中心之间的距离
const float dist3D = cv::norm(PO);
// Depth must be inside the scale pyramid of the image
//深度值必须在最大距离和最小距离的范围内
if(dist3D<minDistance || dist3D>maxDistance )
continue;
// Viewing angle must be less than 60 deg
cv::Mat Pn = pMP->GetNormal();
if(PO.dot(Pn)<0.5*dist3D)
continue;
//通过距离预测层级
int nPredictedLevel = pMP->PredictScale(dist3D,pKF);
// Search in a radius
const float radius = th*pKF->mvScaleFactors[nPredictedLevel];
//查找关键帧pKF中u,v关键点半径radius内的关键点
const vector<size_t> vIndices = pKF->GetFeaturesInArea(u,v,radius);
if(vIndices.empty())
continue;
// Match to the most similar keypoint in the radius
const cv::Mat dMP = pMP->GetDescriptor();
int bestDist = 256;
int bestIdx = -1;
//进行描述子对比
for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
{
const size_t idx = *vit;
const cv::KeyPoint &kp = pKF->mvKeysUn[idx];
const int &kpLevel= kp.octave;
if(kpLevel<nPredictedLevel-1 || kpLevel>nPredictedLevel)
continue;
if(pKF->mvuRight[idx]>=0)
{
// Check reprojection error in stereo
const float &kpx = kp.pt.x;
const float &kpy = kp.pt.y;
const float &kpr = pKF->mvuRight[idx];
const float ex = u-kpx;
const float ey = v-kpy;
const float er = ur-kpr;
const float e2 = ex*ex+ey*ey+er*er;
if(e2*pKF->mvInvLevelSigma2[kpLevel]>7.8)
continue;
}
else
{
const float &kpx = kp.pt.x;
const float &kpy = kp.pt.y;
const float ex = u-kpx;
const float ey = v-kpy;
const float e2 = ex*ex+ey*ey;
if(e2*pKF->mvInvLevelSigma2[kpLevel]>5.99)
continue;
}
const cv::Mat &dKF = pKF->mDescriptors.row(idx);
const int dist = DescriptorDistance(dMP,dKF);
if(dist<bestDist)
{
bestDist = dist;
bestIdx = idx;
}
}
// If there is already a MapPoint replace otherwise add new measurement
//进行地图点替换
if(bestDist <= TH_LOW)
{
MapPoint* pMPinKF = pKF->GetMapPoint(bestIdx);
if(pMPinKF)
{
if(!pMPinKF->isBad())
{
if(pMPinKF->Observations() > pMP->Observations())
pMP->Replace(pMPinKF);
else
pMPinKF->Replace(pMP);
}
}
else
{
pMP->AddObservation(pKF,bestIdx);
pKF->AddMapPoint(pMP,bestIdx);
}
nFused++;
}
}
return nFused;
}