Frame这个类的作用
主要是用来管理每一帧图像,包括提取特征点,判断特征点的是否在相机视野范围内,返投影特征点到三维空间等工作;
Frame类中最值得关注的就是这里将图像分成若干个cell,这样做的目的是为了能够让特征点提取的比较均匀
Frame.h文件
/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* 主要是用来管理每一帧图像,包括提取特征点,判断特征点的是否在相机视野范围内,反投影特征点到三维空间等工作
* 1.每帧图像都有其唯一的FrameID
* 2.保存了图像的畸变之前和之后的特征点坐标;
* 3.由相机内参和畸变参数;
* 4.将一帧图像分为很多个小的网格,然后将网格中的特征点分别储存;
* 5.保存了每帧图像的参考关键帧;
* 6.保存了每帧图像的相机中心的的旋转平移的位姿
*/
#ifndef FRAME_H
#define FRAME_H
#include<vector>
#include "MapPoint.h"
#include "Thirdparty/DBoW2/DBoW2/BowVector.h"
#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h"
#include "ORBVocabulary.h"
#include "KeyFrame.h"
#include "ORBextractor.h"
#include <opencv2/opencv.hpp>
namespace ORB_SLAM2
{
/**
* 这里会将每帧图像分为48*64个格子
*/
#define FRAME_GRID_ROWS 48
#define FRAME_GRID_COLS 64
class MapPoint;
class KeyFrame;
class Frame
{
public:
Frame();
// Copy constructor.
Frame(const Frame &frame);
// Constructor for stereo cameras.
Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);
// Constructor for RGB-D cameras.
Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);
// Constructor for Monocular cameras.
Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);
// Extract ORB on the image. 0 for left image and 1 for right image.
void ExtractORB(int flag, const cv::Mat &im);
// Compute Bag of Words representation.
void ComputeBoW();
// Set the camera pose.
void SetPose(cv::Mat Tcw);
// Computes rotation, translation and camera center matrices from the camera pose.
void UpdatePoseMatrices();
// Returns the camera center.
inline cv::Mat GetCameraCenter(){
return mOw.clone();
}
// Returns inverse of rotation
inline cv::Mat GetRotationInverse(){
return mRwc.clone();
}
// Check if a MapPoint is in the frustum of the camera
// and fill variables of the MapPoint to be used by the tracking
bool isInFrustum(MapPoint* pMP, float viewingCosLimit);
// Compute the cell of a keypoint (return false if outside the grid)
bool PosInGrid(const cv::KeyPoint &kp, int &posX, int &posY);
///将图像金字塔范围从minLevel到maxLevel,并以(x,y)为圆心,r为半径的范围内的特征点取出来
vector<size_t> GetFeaturesInArea(const float &x, const float &y, const float &r, const int minLevel=-1, const int maxLevel=-1) const;
// Search a match for each keypoint in the left image to a keypoint in the right image.
// If there is a match, depth is computed and the right coordinate associated to the left keypoint is stored.
void ComputeStereoMatches();
// Associate a "right" coordinate to a keypoint if there is valid depth in the depthmap.
void ComputeStereoFromRGBD(const cv::Mat &imDepth);
// Backprojects a keypoint (if stereo/depth info available) into 3D world coordinates.
cv::Mat UnprojectStereo(const int &i);
public:
// Vocabulary used for relocalization.
ORBVocabulary* mpORBvocabulary;
// Feature extractor. The right is used only in the stereo case.
ORBextractor* mpORBextractorLeft, *mpORBextractorRight;
// Frame timestamp.
double mTimeStamp;
// Calibration matrix and OpenCV distortion parameters.
cv::Mat mK;
static float fx;
static float fy;
static float cx;
static float cy;
static float invfx;
static float invfy;
cv::Mat mDistCoef;
// Stereo baseline multiplied by fx.
float mbf;
// Stereo baseline in meters.
float mb;
// Threshold close/far points. Close points are inserted from 1 view.
// Far points are inserted as in the monocular case from 2 views.
float mThDepth;
// Number of KeyPoints.
int N;
// Vector of keypoints (original for visualization) and undistorted (actually used by the system).
// In the stereo case, mvKeysUn is redundant as images must be rectified.
// In the RGB-D case, RGB images can be distorted.
std::vector<cv::KeyPoint> mvKeys, mvKeysRight;
std::vector<cv::KeyPoint> mvKeysUn;
// Corresponding stereo coordinate and depth for each keypoint.
// "Monocular" keypoints have a negative value.
std::vector<float> mvuRight;
std::vector<float> mvDepth;
// Bag of Words Vector structures.
DBoW2::BowVector mBowVec;
DBoW2::FeatureVector mFeatVec;
// ORB descriptor, each row associated to a keypoint.
cv::Mat mDescriptors, mDescriptorsRight;
// MapPoints associated to keypoints, NULL pointer if no association.
std::vector<MapPoint*> mvpMapPoints;
// Flag to identify outlier associations.
std::vector<bool> mvbOutlier;
// Keypoints are assigned to cells in a grid to reduce matching complexity when projecting MapPoints.
static float mfGridElementWidthInv;
static float mfGridElementHeightInv;
/// 这里的mGrid是一个二维的数组,数组里面储存的类型是std::vector<std::size_t>,
///一个图像网格中的特征点全部储存在一个std::vector<std::size_t>中
std::vector<std::size_t> mGrid[FRAME_GRID_COLS][FRAME_GRID_ROWS];
// Camera pose.
cv::Mat mTcw;
// Current and Next Frame id.
static long unsigned int nNextId;
long unsigned int mnId;
// Reference Keyframe.
KeyFrame* mpReferenceKF;
// Scale pyramid info.
int mnScaleLevels;
float mfScaleFactor;
float mfLogScaleFactor;
vector<float> mvScaleFactors;
vector<float> mvInvScaleFactors;
vector<float> mvLevelSigma2;
vector<float> mvInvLevelSigma2;
// Undistorted Image Bounds (computed once).
static float mnMinX;
static float mnMaxX;
static float mnMinY;
static float mnMaxY;
static bool mbInitialComputations;
private:
// Undistort keypoints given OpenCV distortion parameters.
// Only for the RGB-D case. Stereo must be already rectified!
// (called in the constructor).
void UndistortKeyPoints();
// Computes image bounds for the undistorted image (called in the constructor).
void ComputeImageBounds(const cv::Mat &imLeft);
// Assign keypoints to the grid for speed up feature matching (called in the constructor).
void AssignFeaturesToGrid();
// Rotation, translation and camera center
/// left multiple mRcw means transform point from world coordinate to camera coordinate
cv::Mat mRcw;
cv::Mat mtcw;
cv::Mat mRwc;
/// camera center coordinates in world frame.相机中心在世界坐标系中的位置
cv::Mat mOw; //==mtwc
};
}// namespace ORB_SLAM
#endif // FRAME_H
Frame.cpp
/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Frame.h"
#include "Converter.h"
#include "ORBmatcher.h"
#include <thread>
namespace ORB_SLAM2
{
long unsigned int Frame::nNextId=0;
bool Frame::mbInitialComputations=true;
float Frame::cx, Frame::cy, Frame::fx, Frame::fy, Frame::invfx, Frame::invfy;
float Frame::mnMinX, Frame::mnMinY, Frame::mnMaxX, Frame::mnMaxY;
float Frame::mfGridElementWidthInv, Frame::mfGridElementHeightInv;
Frame::Frame()
{}
//Copy Constructor
/**
* 1.用来copy Frame的结构体
*/
Frame::Frame(const Frame &frame)
:mpORBvocabulary(frame.mpORBvocabulary), mpORBextractorLeft(frame.mpORBextractorLeft), mpORBextractorRight(frame.mpORBextractorRight),
mTimeStamp(frame.mTimeStamp), mK(frame.mK.clone()), mDistCoef(frame.mDistCoef.clone()),
mbf(frame.mbf), mb(frame.mb), mThDepth(frame.mThDepth), N(frame.N), mvKeys(frame.mvKeys),
mvKeysRight(frame.mvKeysRight), mvKeysUn(frame.mvKeysUn), mvuRight(frame.mvuRight),
mvDepth(frame.mvDepth), mBowVec(frame.mBowVec), mFeatVec(frame.mFeatVec),
mDescriptors(frame.mDescriptors.clone()), mDescriptorsRight(frame.mDescriptorsRight.clone()),
mvpMapPoints(frame.mvpMapPoints), mvbOutlier(frame.mvbOutlier), mnId(frame.mnId),
mpReferenceKF(frame.mpReferenceKF), mnScaleLevels(frame.mnScaleLevels),
mfScaleFactor(frame.mfScaleFactor), mfLogScaleFactor(frame.mfLogScaleFactor),
mvScaleFactors(frame.mvScaleFactors), mvInvScaleFactors(frame.mvInvScaleFactors),
mvLevelSigma2(frame.mvLevelSigma2), mvInvLevelSigma2(frame.mvInvLevelSigma2)
{
for(int i=0;i<FRAME_GRID_COLS;i++)
for(int j=0; j<FRAME_GRID_ROWS; j++)
mGrid[i][j]=frame.mGrid[i][j];
if(!frame.mTcw.empty())
SetPose(frame.mTcw);
}
/**
* 1.设置Frame ID;
* 2.设置金字塔缩放层数,和金字塔缩放参数;
* 3.创建线程提取ORB特征,储存到mvKeys;
* 4.给特征点去畸变,储存到mvKeysUn;
* 5.给左图中的特征点在右图中找到匹配的特征点;
* 6.计算图像的边界;
* 7.将特征点储存到mGrid对应的网格中.
*/
Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth)
:mpORBvocabulary(voc),mpORBextractorLeft(extractorLeft),mpORBextractorRight(extractorRight), mTimeStamp(timeStamp), mK(K.clone()),mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mpReferenceKF(static_cast<KeyFrame*>(NULL))
{
// Frame ID
mnId=nNextId++;
// Scale Level Info
mnScaleLevels = mpORBextractorLeft->GetLevels();
mfScaleFactor = mpORBextractorLeft->GetScaleFactor();
mfLogScaleFactor = log(mfScaleFactor);
mvScaleFactors = mpORBextractorLeft->GetScaleFactors();
mvInvScaleFactors = mpORBextractorLeft->GetInverseScaleFactors();
mvLevelSigma2 = mpORBextractorLeft->GetScaleSigmaSquares();
mvInvLevelSigma2 = mpORBextractorLeft->GetInverseScaleSigmaSquares();
// ORB extraction
thread threadLeft(&Frame::ExtractORB,this,0,imLeft);
thread threadRight(&Frame::ExtractORB,this,1,imRight);
threadLeft.join();
threadRight.join();
N = mvKeys.size();
if(mvKeys.empty())
return;
UndistortKeyPoints();
ComputeStereoMatches();
mvpMapPoints = vector<MapPoint*>(N,static_cast<MapPoint*>(NULL));
mvbOutlier = vector<bool>(N,false);
// This is done only for the first Frame (or after a change in the calibration)
if(mbInitialComputations)
{
ComputeImageBounds(imLeft);
mfGridElementWidthInv=static_cast<float>(FRAME_GRID_COLS)/(mnMaxX-mnMinX);
mfGridElementHeightInv=static_cast<float>(FRAME_GRID_ROWS)/(mnMaxY-mnMinY);
fx = K.at<float>(0,0);
fy = K.at<float>(1,1);
cx = K.at<float>(0,2);
cy = K.at<float>(1,2);
invfx = 1.0f/fx;
invfy = 1.0f/fy;
mbInitialComputations=false;
}
mb = mbf/fx;
AssignFeaturesToGrid();
}
Frame::Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth)
:mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast<ORBextractor*>(NULL)),
mTimeStamp(timeStamp), mK(K.clone()),mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth)
{
// Frame ID
mnId=nNextId++;
// Scale Level Info
mnScaleLevels = mpORBextractorLeft->GetLevels();
mfScaleFactor = mpORBextractorLeft->GetScaleFactor();
mfLogScaleFactor = log(mfScaleFactor);
mvScaleFactors = mpORBextractorLeft->GetScaleFactors();
mvInvScaleFactors = mpORBextractorLeft->GetInverseScaleFactors();
mvLevelSigma2 = mpORBextractorLeft->GetScaleSigmaSquares();
mvInvLevelSigma2 = mpORBextractorLeft->GetInverseScaleSigmaSquares();
// ORB extraction
ExtractORB(0,imGray);
N = mvKeys.size();
if(mvKeys.empty())
return;
UndistortKeyPoints();
ComputeStereoFromRGBD(imDepth);
mvpMapPoints = vector<MapPoint*>(N,static_cast<MapPoint*>(NULL));
mvbOutlier = vector<bool>(N,false);
// This is done only for the first Frame (or after a change in the calibration)
if(mbInitialComputations)
{
ComputeImageBounds(imGray);
mfGridElementWidthInv=static_cast<float>(FRAME_GRID_COLS)/static_cast<float>(mnMaxX-mnMinX);
mfGridElementHeightInv=static_cast<float>(FRAME_GRID_ROWS)/static_cast<float>(mnMaxY-mnMinY);
fx = K.at<float>(0,0);
fy = K.at<float>(1,1);
cx = K.at<float>(0,2);
cy = K.at<float>(1,2);
invfx = 1.0f/fx;
invfy = 1.0f/fy;
mbInitialComputations=false;
}
mb = mbf/fx;
AssignFeaturesToGrid();
}
Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth)
:mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast<ORBextractor*>(NULL)),
mTimeStamp(timeStamp), mK(K.clone()),mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth)
{
// Frame ID
mnId=nNextId++;
// Scale Level Info
mnScaleLevels = mpORBextractorLeft->GetLevels();
mfScaleFactor = mpORBextractorLeft->GetScaleFactor();
mfLogScaleFactor = log(mfScaleFactor);
mvScaleFactors = mpORBextractorLeft->GetScaleFactors();
mvInvScaleFactors = mpORBextractorLeft->GetInverseScaleFactors();
mvLevelSigma2 = mpORBextractorLeft->GetScaleSigmaSquares();
mvInvLevelSigma2 = mpORBextractorLeft->GetInverseScaleSigmaSquares();
// ORB extraction
ExtractORB(0,imGray);
N = mvKeys.size();
if(mvKeys.empty())
return;
UndistortKeyPoints();
// Set no stereo information
mvuRight = vector<float>(N,-1);
mvDepth = vector<float>(N,-1);
mvpMapPoints = vector<MapPoint*>(N,static_cast<MapPoint*>(NULL));
mvbOutlier = vector<bool>(N,false);
// This is done only for the first Frame (or after a change in the calibration)
if(mbInitialComputations)
{
// cv::imwrite("/home/bo/imGray.png", imGray);
ComputeImageBounds(imGray);
// cv::imwrite("/home/bo/undistorted_imGray.png", imGray.colRange(mnMinX, mnMaxX).rowRange(mnMinY, mnMaxY));
// exit(0);
mfGridElementWidthInv=static_cast<float>(FRAME_GRID_COLS)/static_cast<float>(mnMaxX-mnMinX);
mfGridElementHeightInv=static_cast<float>(FRAME_GRID_ROWS)/static_cast<float>(mnMaxY-mnMinY);
fx = K.at<float>(0,0);
fy = K.at<float>(1,1);
cx = K.at<float>(0,2);
cy = K.at<float>(1,2);
invfx = 1.0f/fx;
invfy = 1.0f/fy;
mbInitialComputations=false;
}
mb = mbf/fx;
AssignFeaturesToGrid();
}
/**
* 1.将特征点分配到对应的网格
*/
void Frame::AssignFeaturesToGrid()
{
int nReserve = 0.5f*N/(FRAME_GRID_COLS*FRAME_GRID_ROWS);
for(unsigned int i=0; i<FRAME_GRID_COLS;i++)
for (unsigned int j=0; j<FRAME_GRID_ROWS;j++)
mGrid[i][j].reserve(nReserve);
for(int i=0;i<N;i++)
{
const cv::KeyPoint &kp = mvKeysUn[i];
///find Keypoint in which Grid cell, and push back into it
int nGridPosX, nGridPosY;
if(PosInGrid(kp,nGridPosX,nGridPosY))
mGrid[nGridPosX][nGridPosY].push_back(i);
}
}
/**
* 1.提取ORB特征点
*/
void Frame::ExtractORB(int flag, const cv::Mat &im)
{
if(flag==0)
(*mpORBextractorLeft)(im,cv::Mat(),mvKeys,mDescriptors);
else
(*mpORBextractorRight)(im,cv::Mat(),mvKeysRight,mDescriptorsRight);
}
/**
* 设置Frame对应的相机的中心Pose,并更新位置姿态矩阵
*/
void Frame::SetPose(cv::Mat Tcw)
{
mTcw = Tcw.clone();
UpdatePoseMatrices();
}
/**
* 1.根据mTcw更新旋转矩阵和平移矩阵等
*/
void Frame::UpdatePoseMatrices()
{
mRcw = mTcw.rowRange(0,3).colRange(0,3);
mRwc = mRcw.t();
mtcw = mTcw.rowRange(0,3).col(3);
mOw = -mRcw.t()*mtcw;
}
/**
* @param pMP : 地图点
* @param viewingCosLimit : 角度的cos值
* @brief :
* 1.将特征点投影到像素平面,投影位置不能超出像素边界;
* 2.将特征点和光心的连线得到PO,然后PO和特征点的平均视角的不能大于60度;
* 3.计算特征点到光心的距离dist,dist需要在特征点的最大的长度和最小的距离的范围内;
* 4.根据特征点到光心的距离预测特征点所在的缩放Level.
*/
bool Frame::isInFrustum(MapPoint *pMP, float viewingCosLimit)
{
pMP->mbTrackInView = false;
// 3D in absolute coordinates
cv::Mat P = pMP->GetWorldPos();
// 3D in camera coordinates
const cv::Mat Pc = mRcw*P+mtcw;
const float &PcX = Pc.at<float>(0);
const float &PcY= Pc.at<float>(1);
const float &PcZ = Pc.at<float>(2);
// Check positive depth
if(PcZ<0.0f)
return false;
// Project in image and check it is not outside
const float invz = 1.0f/PcZ;
const float u=fx*PcX*invz+cx;
const float v=fy*PcY*invz+cy;
/// check u, v in image plane
if(u<mnMinX || u>mnMaxX)
return false;
if(v<mnMinY || v>mnMaxY)
return false;
// Check distance is in the scale invariance region of the MapPoint
///每个特征点都是对应缩放尺寸的金字塔的图像提取出来的,具有一定的有效深度
const float maxDistance = pMP->GetMaxDistanceInvariance();
const float minDistance = pMP->GetMinDistanceInvariance();
///点到光心的距离
const cv::Mat PO = P-mOw;
/// point's distance to center of camera
const float dist = cv::norm(PO);
if(dist<minDistance || dist>maxDistance)
return false;
// Check viewing angle
/// pn vector is a mean view angle from camera center to the map point,
/// if the diff angle between the calculate view angle and mean view angle
/// is large than 60 degree then this map point not in this frame
cv::Mat Pn = pMP->GetNormal();
const float viewCos = PO.dot(Pn)/dist;
if(viewCos<viewingCosLimit)
return false;
// Predict scale in the image
///根据特征点的深度预测特征点所在的金字塔缩放层数
const int nPredictedLevel = pMP->PredictScale(dist,this);
// Data used by the tracking
pMP->mbTrackInView = true;
pMP->mTrackProjX = u;
pMP->mTrackProjXR = u - mbf*invz;
pMP->mTrackProjY = v;
pMP->mnTrackScaleLevel= nPredictedLevel;
pMP->mTrackViewCos = viewCos;
return true;
}
/**
* get circle range feature center (x, y) and radius (r), pyramid range minLevel to maxLevel, return keypoints' s index
* 1.计算nMinCellX,nMaxCellX,nMinCellY,nMaxCellY,如果超过图像边界就返回
* 2.遍历mGrid,搜索在以x,y为正方形中心,2r为正方形边长的范围内的特征点,并返回
* */
vector<size_t> Frame::GetFeaturesInArea(const float &x, const float &y, const float &r, const int minLevel, const int maxLevel) const
{
vector<size_t> vIndices;
vIndices.reserve(N);
///compute cell index according to x, y, r
const int nMinCellX = max(0,(int)floor((x-mnMinX-r)*mfGridElementWidthInv));
if(nMinCellX>=FRAME_GRID_COLS)
return vIndices;
const int nMaxCellX = min((int)FRAME_GRID_COLS-1,(int)ceil((x-mnMinX+r)*mfGridElementWidthInv));
if(nMaxCellX<0)
return vIndices;
const int nMinCellY = max(0,(int)floor((y-mnMinY-r)*mfGridElementHeightInv));
if(nMinCellY>=FRAME_GRID_ROWS)
return vIndices;
const int nMaxCellY = min((int)FRAME_GRID_ROWS-1,(int)ceil((y-mnMinY+r)*mfGridElementHeightInv));
if(nMaxCellY<0)
return vIndices;
const bool bCheckLevels = (minLevel>0) || (maxLevel>=0);
for(int ix = nMinCellX; ix<=nMaxCellX; ix++)
{
for(int iy = nMinCellY; iy<=nMaxCellY; iy++)
{
const vector<size_t> vCell = mGrid[ix][iy];
if(vCell.empty())
continue;
for(size_t j=0, jend=vCell.size(); j<jend; j++)
{
const cv::KeyPoint &kpUn = mvKeysUn[vCell[j]];
if(bCheckLevels)
{
if(kpUn.octave<minLevel)
continue;
if(maxLevel>=0)
if(kpUn.octave>maxLevel)
continue;
}
const float distx = kpUn.pt.x-x;
const float disty = kpUn.pt.y-y;
if(fabs(distx)<r && fabs(disty)<r)
vIndices.push_back(vCell[j]);
}
}
}
return vIndices;
}
/**
* 1.根据特征点的坐标计算出特征点的网格坐标
*/
bool Frame::PosInGrid(const cv::KeyPoint &kp, int &posX, int &posY)
{
posX = round((kp.pt.x-mnMinX)*mfGridElementWidthInv);
posY = round((kp.pt.y-mnMinY)*mfGridElementHeightInv);
//Keypoint's coordinates are undistorted, which could cause to go out of the image
if(posX<0 || posX>=FRAME_GRID_COLS || posY<0 || posY>=FRAME_GRID_ROWS)
return false;
return true;
}
/**
* 根据描述子mDescriptors计算Bow
*/
void Frame::ComputeBoW()
{
if(mBowVec.empty())
{
vector<cv::Mat> vCurrentDesc = Converter::toDescriptorVector(mDescriptors);
mpORBvocabulary->transform(vCurrentDesc,mBowVec,mFeatVec,4);
}
}
/**
* 特征点去畸变
*/
void Frame::UndistortKeyPoints()
{
if(mDistCoef.at<float>(0)==0.0)
{
mvKeysUn=mvKeys;
return;
}
// Fill matrix with points
cv::Mat mat(N,2,CV_32F);
for(int i=0; i<N; i++)
{
mat.at<float>(i,0)=mvKeys[i].pt.x;
mat.at<float>(i,1)=mvKeys[i].pt.y;
}
// Undistort points
mat=mat.reshape(2);
/// input arry, output arry, camera matrix, distortion coef, input array R, input arrayP
cv::undistortPoints(mat,mat,mK,mDistCoef,cv::Mat(),mK);
mat=mat.reshape(1);
// Fill undistorted keypoint vector
/// Mat Mat::reshape(int cn, int rows=0) const
/// cn : chaneels, rows:
mvKeysUn.resize(N);
for(int i=0; i<N; i++)
{
cv::KeyPoint kp = mvKeys[i];
kp.pt.x=mat.at<float>(i,0);
kp.pt.y=mat.at<float>(i,1);
mvKeysUn[i]=kp;
}
}
/**
* 取去畸变之前的图像的四个角落的点,然后进行去畸变,
* 计算出去畸变之后的坐标,作为去畸变之后图像的边界
*/
void Frame::ComputeImageBounds(const cv::Mat &imLeft)
{
if(mDistCoef.at<float>(0)!=0.0)
{
cv::Mat mat(4,2,CV_32F);
mat.at<float>(0,0)=0.0; mat.at<float>(0,1)=0.0;
mat.at<float>(1,0)=imLeft.cols; mat.at<float>(1,1)=0.0;
mat.at<float>(2,0)=0.0; mat.at<float>(2,1)=imLeft.rows;
mat.at<float>(3,0)=imLeft.cols; mat.at<float>(3,1)=imLeft.rows;
// Undistort corners
mat=mat.reshape(2);
cv::undistortPoints(mat,mat,mK,mDistCoef,cv::Mat(),mK);
mat=mat.reshape(1);
mnMinX = min(mat.at<float>(0,0),mat.at<float>(2,0));
mnMaxX = max(mat.at<float>(1,0),mat.at<float>(3,0));
mnMinY = min(mat.at<float>(0,1),mat.at<float>(1,1));
mnMaxY = max(mat.at<float>(2,1),mat.at<float>(3,1));
}
else
{
mnMinX = 0.0f;
mnMaxX = imLeft.cols;
mnMinY = 0.0f;
mnMaxY = imLeft.rows;
}
}
/**
* 左右图像的特征点的匹配
*/
void Frame::ComputeStereoMatches()
{
mvuRight = vector<float>(N,-1.0f);
mvDepth = vector<float>(N,-1.0f);
const int thOrbDist = (ORBmatcher::TH_HIGH+ORBmatcher::TH_LOW)/2;
const int nRows = mpORBextractorLeft->mvImagePyramid[0].rows;
//Assign keypoints to row table
vector<vector<size_t> > vRowIndices(nRows,vector<size_t>());
for(int i=0; i<nRows; i++)
vRowIndices[i].reserve(200);
const int Nr = mvKeysRight.size();
for(int iR=0; iR<Nr; iR++)
{
const cv::KeyPoint &kp = mvKeysRight[iR];
const float &kpY = kp.pt.y;
const float r = 2.0f*mvScaleFactors[mvKeysRight[iR].octave];
const int maxr = ceil(kpY+r);
const int minr = floor(kpY-r);
for(int yi=minr;yi<=maxr;yi++)
vRowIndices[yi].push_back(iR);
}
// Set limits for search
const float minZ = mb;
const float minD = 0;
const float maxD = mbf/minZ;
// For each left keypoint search a match in the right image
vector<pair<int, int> > vDistIdx;
vDistIdx.reserve(N);
for(int iL=0; iL<N; iL++)
{
const cv::KeyPoint &kpL = mvKeys[iL];
const int &levelL = kpL.octave;
const float &vL = kpL.pt.y;
const float &uL = kpL.pt.x;
const vector<size_t> &vCandidates = vRowIndices[vL];
if(vCandidates.empty())
continue;
const float minU = uL-maxD;
const float maxU = uL-minD;
if(maxU<0)
continue;
int bestDist = ORBmatcher::TH_HIGH;
size_t bestIdxR = 0;
const cv::Mat &dL = mDescriptors.row(iL);
// Compare descriptor to right keypoints
for(size_t iC=0; iC<vCandidates.size(); iC++)
{
const size_t iR = vCandidates[iC];
const cv::KeyPoint &kpR = mvKeysRight[iR];
if(kpR.octave<levelL-1 || kpR.octave>levelL+1)
continue;
const float &uR = kpR.pt.x;
if(uR>=minU && uR<=maxU)
{
const cv::Mat &dR = mDescriptorsRight.row(iR);
const int dist = ORBmatcher::DescriptorDistance(dL,dR);
if(dist<bestDist)
{
bestDist = dist;
bestIdxR = iR;
}
}
}
// Subpixel match by correlation
if(bestDist<thOrbDist)
{
// coordinates in image pyramid at keypoint scale
const float uR0 = mvKeysRight[bestIdxR].pt.x;
const float scaleFactor = mvInvScaleFactors[kpL.octave];
const float scaleduL = round(kpL.pt.x*scaleFactor);
const float scaledvL = round(kpL.pt.y*scaleFactor);
const float scaleduR0 = round(uR0*scaleFactor);
// sliding window search
const int w = 5;
cv::Mat IL = mpORBextractorLeft->mvImagePyramid[kpL.octave].rowRange(scaledvL-w,scaledvL+w+1).colRange(scaleduL-w,scaleduL+w+1);
IL.convertTo(IL,CV_32F);
IL = IL - IL.at<float>(w,w) *cv::Mat::ones(IL.rows,IL.cols,CV_32F);
int bestDist = INT_MAX;
int bestincR = 0;
const int L = 5;
vector<float> vDists;
vDists.resize(2*L+1);
const float iniu = scaleduR0+L-w;
const float endu = scaleduR0+L+w+1;
if(iniu<0 || endu >= mpORBextractorRight->mvImagePyramid[kpL.octave].cols)
continue;
for(int incR=-L; incR<=+L; incR++)
{
cv::Mat IR = mpORBextractorRight->mvImagePyramid[kpL.octave].rowRange(scaledvL-w,scaledvL+w+1).colRange(scaleduR0+incR-w,scaleduR0+incR+w+1);
IR.convertTo(IR,CV_32F);
IR = IR - IR.at<float>(w,w) *cv::Mat::ones(IR.rows,IR.cols,CV_32F);
float dist = cv::norm(IL,IR,cv::NORM_L1);
if(dist<bestDist)
{
bestDist = dist;
bestincR = incR;
}
vDists[L+incR] = dist;
}
if(bestincR==-L || bestincR==L)
continue;
// Sub-pixel match (Parabola fitting)
const float dist1 = vDists[L+bestincR-1];
const float dist2 = vDists[L+bestincR];
const float dist3 = vDists[L+bestincR+1];
const float deltaR = (dist1-dist3)/(2.0f*(dist1+dist3-2.0f*dist2));
if(deltaR<-1 || deltaR>1)
continue;
// Re-scaled coordinate
float bestuR = mvScaleFactors[kpL.octave]*((float)scaleduR0+(float)bestincR+deltaR);
float disparity = (uL-bestuR);
if(disparity>=minD && disparity<maxD)
{
if(disparity<=0)
{
disparity=0.01;
bestuR = uL-0.01;
}
mvDepth[iL]=mbf/disparity;
mvuRight[iL] = bestuR;
vDistIdx.push_back(pair<int,int>(bestDist,iL));
}
}
}
sort(vDistIdx.begin(),vDistIdx.end());
const float median = vDistIdx[vDistIdx.size()/2].first;
const float thDist = 1.5f*1.4f*median;
for(int i=vDistIdx.size()-1;i>=0;i--)
{
if(vDistIdx[i].first<thDist)
break;
else
{
mvuRight[vDistIdx[i].second]=-1;
mvDepth[vDistIdx[i].second]=-1;
}
}
}
/**
* 1.取畸变图像特征点的坐标,得到特征点的深度信息;
* 2.根据深度信息d和去畸变之后的base,fx等信息计算在右坐标系中的坐标
*/
void Frame::ComputeStereoFromRGBD(const cv::Mat &imDepth)
{
mvuRight = vector<float>(N,-1);
mvDepth = vector<float>(N,-1);
for(int i=0; i<N; i++)
{
const cv::KeyPoint &kp = mvKeys[i];
const cv::KeyPoint &kpU = mvKeysUn[i];
const float &v = kp.pt.y;
const float &u = kp.pt.x;
const float d = imDepth.at<float>(v,u);
if(d>0)
{
mvDepth[i] = d;
mvuRight[i] = kpU.pt.x-mbf/d;
}
}
}
/*反投影特征点带世界坐标系 */
cv::Mat Frame::UnprojectStereo(const int &i)
{
const float z = mvDepth[i];
if(z>0)
{
const float u = mvKeysUn[i].pt.x;
const float v = mvKeysUn[i].pt.y;
const float x = (u-cx)*z*invfx;
const float y = (v-cy)*z*invfy;
cv::Mat x3Dc = (cv::Mat_<float>(3,1) << x, y, z);
return mRwc*x3Dc+mOw;
}
else
return cv::Mat();
}
} //namespace ORB_SLAM