初始化跟踪线程但不开启线程
mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer,
mpMap, mpKeyFrameDatabase, strSettingsFile, mSensor);
1.变量赋值
Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer, Map *pMap, KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor):
mState(NO_IMAGES_YET), mSensor(sensor), mbOnlyTracking(false), mbVO(false), mpORBVocabulary(pVoc),
mpKeyFrameDB(pKFDB), mpInitializer(static_cast<Initializer*>(NULL)), mpSystem(pSys), mpViewer(NULL),
mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpMap(pMap), mnLastRelocFrameId(0)
2.读取配置文件并输出配置文件信息
//【1】------------------ 相机内参数矩阵 K------------------------
// |fx 0 cx|
// K = |0 fy cy|
// |0 0 1 |
float fx = fSettings["Camera.fx"];
float fy = fSettings["Camera.fy"];
float cx = fSettings["Camera.cx"];
float cy = fSettings["Camera.cy"];
cv::Mat K = cv::Mat::eye(3,3,CV_32F);// 初始化为 对角矩阵
K.at<float>(0,0) = fx;
K.at<float>(1,1) = fy;
K.at<float>(0,2) = cx;
K.at<float>(1,2) = cy;
// 拷贝到 类内变量 mK 为类内 可访问变量
K.copyTo(mK);//
// 【2】-------畸变校正 参数----------------------------------------
cv::Mat DistCoef(4,1,CV_32F);// 相机畸变矫正 矩阵
DistCoef.at<float>(0) = fSettings["Camera.k1"];
DistCoef.at<float>(1) = fSettings["Camera.k2"];
DistCoef.at<float>(2) = fSettings["Camera.p1"];
DistCoef.at<float>(3) = fSettings["Camera.p2"];
const float k3 = fSettings["Camera.k3"];
if(k3!=0)
{
DistCoef.resize(5);
DistCoef.at<float>(4) = k3;
}
// 拷贝到 类内变量
DistCoef.copyTo(mDistCoef);
// 基线 * fx
mbf = fSettings["Camera.bf"];
//----------------拍摄 帧率---------------------------
float fps = fSettings["Camera.fps"];
if(fps==0)
fps=30;
// Max/Min Frames to insert keyframes and to check relocalisation
// 关键帧 间隔
mMinFrames = 0;
mMaxFrames = fps;
// 【3】------------------显示参数--------------------------
cout << endl << "相机参数 Camera Parameters: " << endl;
cout << "-- fx: " << fx << endl;
cout << "-- fy: " << fy << endl;
cout << "-- cx: " << cx << endl;
cout << "-- cy: " << cy << endl;
cout << "-- k1: " << DistCoef.at<float>(0) << endl;
cout << "-- k2: " << DistCoef.at<float>(1) << endl;
if(DistCoef.rows==5)
cout << "-- k3: " << DistCoef.at<float>(4) << endl;
cout << "-- p1: " << DistCoef.at<float>(2) << endl;
cout << "-- p2: " << DistCoef.at<float>(3) << endl;
cout << "-- fps: " << fps << endl;
// 图像通道顺序 1 RGB顺序 0 BGR 顺序
int nRGB = fSettings["Camera.RGB"];
mbRGB = nRGB;
if(mbRGB)
cout << "-- 彩色图通道顺序color order: RGB (ignored if grayscale)" << endl;
else
cout << "-- 彩色图通道顺序 color order: BGR (ignored if grayscale)" << endl;
//【4】-----------载入 ORB特征提取参数 Load ORB parameters------------------------------------
// 每一帧提取的特征点数 1000
int nFeatures = fSettings["ORBextractor.nFeatures"]; //每张图像提取的特征点总数量 2000
// 图像建立金字塔时的变化尺度 1.2
float fScaleFactor = fSettings["ORBextractor.scaleFactor"]; //尺度因子1.2 图像金字塔 尺度因子
// 尺度金字塔的层数 8
int nLevels = fSettings["ORBextractor.nLevels"];// 金字塔总层数 8
// 提取fast特征点的默认阈值 20
int fIniThFAST = fSettings["ORBextractor.iniThFAST"];// 快速角点提取 算法参数 阈值
// 如果默认阈值提取不出足够fast特征点,则使用最小阈值 8
int fMinThFAST = fSettings["ORBextractor.minThFAST"];//
3.创建 ORB特征提取对象
mpORBextractorLeft = new ORBextractor(nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST);
//功能函数
// 类构造函数 初始化函数
// 特征点总数 尺度因子 金字塔总层数 快速角点提取阈值大 小
// 为了防止用默认阈值fast角点检测检测的特征数过少,
// 添加设置min_fast_threshold最小的fast特征检测阈值,以保证检测的特征数目。
ORBextractor::ORBextractor(int _nfeatures, float _scaleFactor, int _nlevels,
int _iniThFAST, int _minThFAST):
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
iniThFAST(_iniThFAST), minThFAST(_minThFAST)
{
// 所有层的 尺度因子
mvScaleFactor.resize(nlevels);
mvLevelSigma2.resize(nlevels);
// 原图 的尺度因子
mvScaleFactor[0]=1.0f;
// 尺度因子的 平方
mvLevelSigma2[0]=1.0f;//
// 【1】在构造函数中,首先先初始化每层的尺度因子 和 尺度因子平方 待用!
for(int i=1; i<nlevels; i++)
{
mvScaleFactor[i]=mvScaleFactor[i-1]*scaleFactor;// sc= Fator^c,Fator初始尺度(默认为1.2) 1 1.2 1.2*1.2 ...
mvLevelSigma2[i]=mvScaleFactor[i]*mvScaleFactor[i];// 尺度因子的平方
}
// 【2】在构造函数中,再 初始化每层的尺度因子的 倒数 和 尺度因子平方的 倒数
mvInvScaleFactor.resize(nlevels);
mvInvLevelSigma2.resize(nlevels);
for(int i=0; i<nlevels; i++)
{
mvInvScaleFactor[i]=1.0f/mvScaleFactor[i];// 尺度因子倒数
mvInvLevelSigma2[i]=1.0f/mvLevelSigma2[i];// 尺度因子平凡的 倒数
}
// 【3】初始化 图像金字塔容器 以及每一层 对应 的特征点数 总数为 nfeatures
// 随着图像越小 越模糊 可以提取到的 特征点个数 会越来越少
mvImagePyramid.resize(nlevels);
mnFeaturesPerLevel.resize(nlevels);//每一层 特征点个数 容器
float factor = 1.0f / scaleFactor;
// 接下来给每层分配待提取的特征数,具体通过等比数列求和的方式,求出每一层应该提取的特征数
// 等比数列 和 为 S = a1 * (1 - q^(n+1))/(1-q) = nfeatures
// 等比数列 首相 a1 = nfeatures * (1-q) /(1-q^(n+1))
float nDesiredFeaturesPerScale = nfeatures*(1 - factor)/(1 - (float)pow((double)factor, (double)nlevels));
//初始层特征点个数 等比数列 首相
int sumFeatures = 0;
for( int level = 0; level < nlevels-1; level++ )
{
mnFeaturesPerLevel[level] = cvRound(nDesiredFeaturesPerScale);
// 前nlevels -1 层 总共的特征点数
sumFeatures += mnFeaturesPerLevel[level];
nDesiredFeaturesPerScale *= factor;
}
//最后一层的 特征点数
mnFeaturesPerLevel[nlevels-1] = std::max(nfeatures - sumFeatures, 0);
// 【4】 接下来做一些初始化,用于计算特征的方向和描述
const int npoints = 512;
// 复制训练的模板
const Point* pattern0 = (const Point*)bit_pattern_31_;
std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern));
//This is for orientation
// pre-compute the end of a row in a circular patch
//用于计算特征方向时,每个v坐标对应最大的u坐标
umax.resize(HALF_PATCH_SIZE + 1);
// 将v坐标划分为两部分进行计算,主要为了确保计算特征主方向的时候,x,y方向对称
//向下取整
int v, v0, vmax = cvFloor(HALF_PATCH_SIZE * sqrt(2.f) / 2 + 1);
//向上取整
int vmin = cvCeil(HALF_PATCH_SIZE * sqrt(2.f) / 2);
const double hp2 = HALF_PATCH_SIZE*HALF_PATCH_SIZE;
for (v = 0; v <= vmax; ++v)
// 通过勾股定理计算四舍五入
umax[v] = cvRound(sqrt(hp2 - v * v));
// Make sure we are symmetric
// 确保对称,即保证是一个圆
for (v = HALF_PATCH_SIZE, v0 = 0; v >= vmin; --v)
{
while (umax[v0] == umax[v0 + 1])
++v0;
umax[v] = v0;
++v0;
}
}
4.输出特征提取的参数
//很好理解
cout << endl << "ORB特征提取参数 ORB Extractor Parameters: " << endl;
cout << "-- 每幅图像特征点数量 Number of Features: " << nFeatures << endl;
cout << "-- 金字塔层数Scale Levels: " << nLevels << endl;
cout << "-- 金字塔尺度Scale Factor: " << fScaleFactor << endl;
cout << "-- 初始快速角点法阈值 Initial Fast Threshold: " << fIniThFAST << endl;
cout << "-- 最小阈值 Minimum Fast Threshold: " << fMinThFAST << endl;
5.判断3D点远近的深度阈值
if(sensor==System::STEREO || sensor==System::RGBD)
{
// 判断一个3D点远/近的阈值 mbf * 35 / fx
mThDepth = mbf*(float)fSettings["ThDepth"]/fx;//深度 阈值
cout << endl << "深度图阈值 Depth Threshold (Close/Far Points): " << mThDepth << endl;
}
6.深度相机深度的比例因子
if(sensor==System::RGBD)
{
// 深度相机disparity 视差 转化为 深度 depth时的因子
mDepthMapFactor = fSettings["DepthMapFactor"];//地图深度 因子
if(fabs(mDepthMapFactor)<1e-5)
mDepthMapFactor=1;
else
mDepthMapFactor = 1.0f/mDepthMapFactor;
}