【学习SLAM】视觉SLAM笔记之Frame 类

61 篇文章 27 订阅
50 篇文章 18 订阅

Frame 类

  1. [数据成员]定义了 ID、时间戳、位姿、相机、图像

在 Frame 中,我们定义了 ID、时间戳、位姿、相机、图像这几个量;
public:// data members
 typedef std::shared_ptr<Frame> Ptr;
 unsigned long id_; // id of this frame
 double time_stamp_; // when it is recorded
 SE3 T_c_w_; // transform from world to camera
 Camera::Ptr camera_; // Pinhole RGBD Camera model 
 Mat color_, depth_; // color and depth image 


Frame( long id, double time_stamp=0, SE3 T_c_w=SE3(), Camera::Ptr camera=nullptr, Mat color=Mat(),Mat depth=Mat() );Frame 中,我们定义了 ID、时间戳、位姿、相机、图像这几个量;
public:// data members
 typedef std::shared_ptr<Frame> Ptr;
 unsigned long id_; // id of this frame
 double time_stamp_; // when it is recorded
 SE3 T_c_w_; // transform from world to camera
 Camera::Ptr camera_; // Pinhole RGBD Camera model 
 Mat color_, depth_; // color and depth image 


Frame( long id, double time_stamp=0, SE3 T_c_w=SE3(), Camera::Ptr camera=nullptr, Mat color=Mat(),Mat depth=Mat() );
  1. [方法函数]创建 Frame、定点深度、获取光心、某个点在视野内

在方法中,方法:创建 Frame、寻找给定点对应的深度、获取相机光心、判断某个点是否在视野内等等
 // factory function
 static Frame::Ptr createFrame(); 
 
 // find the depth in depth map
 double findDepth( const cv::KeyPoint& kp );
 
 // Get Camera Center
 Vector3d getCamCenter() const;
 
 // check if a point is in this frame 
 bool isInFrame( const Vector3d& pt_world );,方法:创建 Frame、寻找给定点对应的深度、获取相机光心、判断某个点是否在视野内等等
 // factory function
 static Frame::Ptr createFrame(); 
 
 // find the depth in depth map
 double findDepth( const cv::KeyPoint& kp );
 
 // Get Camera Center
 Vector3d getCamCenter() const;
 
 // check if a point is in this frame 
 bool isInFrame( const Vector3d& pt_world );

 

视野内120-140

Tracking.cpp  if(mCurrentFrame.isInFrustum(pMP,0.5))

// Project (this fills MapPoint variables for matching)
// 步骤2.1:判断LocalMapPoints中的点是否在在视野内
if(mCurrentFrame.isInFrustum(pMP,0.5))
{
   // 观测到该点的帧数加1,该MapPoint在某些帧的视野范围内
 pMP->IncreaseVisible();
 // 只有在视野范围内的MapPoints才参与之后的投影匹配
 nToMatch++;
}
// 步骤2.1:判断LocalMapPoints中的点是否在在视野内
if(mCurrentFrame.isInFrustum(pMP,0.5))
{
   // 观测到该点的帧数加1,该MapPoint在某些帧的视野范围内
 pMP->IncreaseVisible();
 // 只有在视野范围内的MapPoints才参与之后的投影匹配
 nToMatch++;
}

frame.cpp cos(60) cos(60/180*pi)=0.5

/**
 * @brief 判断一个点是否在视野内
 *
 * 计算了重投影坐标,观测方向夹角,预测在当前帧的尺度
 * @param  pMP             MapPoint
 * @param  viewingCosLimit 视角和平均视角的方向阈值
 * @return                 true if is in view
 * @see SearchLocalPoints()
 */
bool Frame::isInFrustum(MapPoint *pMP, float viewingCosLimit)
{
    pMP->mbTrackInView = false;
 // 3D in absolute coordinates
 cv::Mat P = pMP->GetWorldPos(); 
 // 3D in camera coordinates
 // 3DP在相机坐标系下的坐标
 const cv::Mat Pc = mRcw*P+mtcw; // 这里的Rt是经过初步的优化后的
 const float &PcX = Pc.at<float>(0);
 const float &PcY = Pc.at<float>(1);
 const float &PcZ = Pc.at<float>(2);
 // Check positive depth
 if(PcZ<0.0f)
        return false;
 // Project in image and check it is not outside
 // V-D 1) MapPoint投影到当前帧, 并判断是否在图像内
 const float invz = 1.0f/PcZ;
 const float u=fx*PcX*invz+cx;
 const float v=fy*PcY*invz+cy;
 if(u<mnMinX || u>mnMaxX)
        return false;
 if(v<mnMinY || v>mnMaxY)
        return false;
 // Check distance is in the scale invariance region of the MapPoint
 // V-D 3) 计算MapPoint到相机中心的距离, 并判断是否在尺度变化的距离内
 const float maxDistance = pMP->GetMaxDistanceInvariance();
 const float minDistance = pMP->GetMinDistanceInvariance();
 // 世界坐标系下,相机到3DP的向量, 向量方向由相机指向3DP
 const cv::Mat PO = P-mOw;
 const float dist = cv::norm(PO);
 if(dist<minDistance || dist>maxDistance)
        return false;
 // Check viewing angle
 // V-D 2) 计算当前视角和平均视角夹角的余弦值, 若小于cos(60), 即夹角大于60度则返回
 cv::Mat Pn = pMP->GetNormal();
 const float viewCos = PO.dot(Pn)/dist;
 if(viewCos<viewingCosLimit)
        return false;
 // Predict scale in the image
 // V-D 4) 根据深度预测尺度(对应特征点在一层)
 const int nPredictedLevel = pMP->PredictScale(dist,this);
 // Data used by the tracking
 // 标记该点将来要被投影
 pMP->mbTrackInView = true;
 pMP->mTrackProjX = u;
 pMP->mTrackProjXR = u - mbf*invz; //3D点投影到双目右侧相机上的横坐标
 pMP->mTrackProjY = v;
 pMP->mnTrackScaleLevel = nPredictedLevel;
 pMP->mTrackViewCos = viewCos;
    return true;
}
 * @brief 判断一个点是否在视野内
 *
 * 计算了重投影坐标,观测方向夹角,预测在当前帧的尺度
 * @param  pMP             MapPoint
 * @param  viewingCosLimit 视角和平均视角的方向阈值
 * @return                 true if is in view
 * @see SearchLocalPoints()
 */
bool Frame::isInFrustum(MapPoint *pMP, float viewingCosLimit)
{
    pMP->mbTrackInView = false;
 // 3D in absolute coordinates
 cv::Mat P = pMP->GetWorldPos(); 
 // 3D in camera coordinates
 // 3DP在相机坐标系下的坐标
 const cv::Mat Pc = mRcw*P+mtcw; // 这里的Rt是经过初步的优化后的
 const float &PcX = Pc.at<float>(0);
 const float &PcY = Pc.at<float>(1);
 const float &PcZ = Pc.at<float>(2);
 // Check positive depth
 if(PcZ<0.0f)
        return false;
 // Project in image and check it is not outside
 // V-D 1) MapPoint投影到当前帧, 并判断是否在图像内
 const float invz = 1.0f/PcZ;
 const float u=fx*PcX*invz+cx;
 const float v=fy*PcY*invz+cy;
 if(u<mnMinX || u>mnMaxX)
        return false;
 if(v<mnMinY || v>mnMaxY)
        return false;
 // Check distance is in the scale invariance region of the MapPoint
 // V-D 3) 计算MapPoint到相机中心的距离, 并判断是否在尺度变化的距离内
 const float maxDistance = pMP->GetMaxDistanceInvariance();
 const float minDistance = pMP->GetMinDistanceInvariance();
 // 世界坐标系下,相机到3DP的向量, 向量方向由相机指向3DP
 const cv::Mat PO = P-mOw;
 const float dist = cv::norm(PO);
 if(dist<minDistance || dist>maxDistance)
        return false;
 // Check viewing angle
 // V-D 2) 计算当前视角和平均视角夹角的余弦值, 若小于cos(60), 即夹角大于60度则返回
 cv::Mat Pn = pMP->GetNormal();
 const float viewCos = PO.dot(Pn)/dist;
 if(viewCos<viewingCosLimit)
        return false;
 // Predict scale in the image
 // V-D 4) 根据深度预测尺度(对应特征点在一层)
 const int nPredictedLevel = pMP->PredictScale(dist,this);
 // Data used by the tracking
 // 标记该点将来要被投影
 pMP->mbTrackInView = true;
 pMP->mTrackProjX = u;
 pMP->mTrackProjXR = u - mbf*invz; //3D点投影到双目右侧相机上的横坐标
 pMP->mTrackProjY = v;
 pMP->mnTrackScaleLevel = nPredictedLevel;
 pMP->mTrackViewCos = viewCos;
    return true;
}

cos(70/180*pi)=0.3420;

单目去畸变 frame.cpp

// Undistort points
// 调整mat的通道为2,矩阵的行列形状不变
mat=mat.reshape(2);
cv::undistortPoints(mat,mat,mK,mDistCoef,cv::Mat(),mK); // cv的函数进行失真校正
mat=mat.reshape(1);

网格 单位是像素吗 frame.h

/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/

#ifndef FRAME_H
#define FRAME_H

#include<vector>

#include "MapPoint.h"
#include "Thirdparty/DBoW2/DBoW2/BowVector.h"
#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h"
#include "ORBVocabulary.h"
#include "KeyFrame.h"
#include "ORBextractor.h"

#include <opencv2/opencv.hpp>

namespace ORB_SLAM2
{
#define FRAME_GRID_ROWS 48  //单位是像素吗 ??
#define FRAME_GRID_COLS 64   //orb_extractor 31*31 网格 ??

class MapPoint;
class KeyFrame;

class Frame
{
public:
    Frame();

    // Copy constructor.
    Frame(const Frame &frame);

    // Constructor for stereo cameras.
    Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);

    // Constructor for RGB-D cameras.
    Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);

    // Constructor for Monocular cameras. //帧的构造,给出对应相机参数及对应原始帧及对应的时间码
    Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth);

    // Extract ORB on the image. 0 for left image and 1 for right image.
    // 提取的关键点存放在mvKeys和mDescriptors中
    // ORB是直接调orbExtractor提取的  对图像进行ORB特征检测
    void ExtractORB(int flag, const cv::Mat &im);

    // Compute Bag of Words representation.
    // 存放在mBowVec中
    void ComputeBoW();

    // Set the camera pose.
    // 用Tcw更新mTcw
    void SetPose(cv::Mat Tcw);

    // Computes rotation, translation and camera center matrices from the camera pose.从相机姿势计算旋转、平移和相机中心矩阵。
    void UpdatePoseMatrices();

    // Returns the camera center.
    inline cv::Mat GetCameraCenter()
	{
        return mOw.clone();
    }

    // Returns inverse of rotation 返回旋转的反方向
    inline cv::Mat GetRotationInverse()
	{
        return mRwc.clone();
    }

    // Check if a MapPoint is in the frustum of the camera//截锥体
    // and fill variables of the MapPoint to be used by the tracking
    // 判断路标点是否在视野中
    bool isInFrustum(MapPoint* pMP, float viewingCosLimit);

    // Compute the cell of a keypoint (return false if outside the grid) 计算关键点的单元格(如果在网格之外,则返回false)
    bool PosInGrid(const cv::KeyPoint &kp, int &posX, int &posY);

    vector<size_t> GetFeaturesInArea(const float &x, const float  &y, const float  &r, const int minLevel=-1, const int maxLevel=-1) const;

    // Search a match for each keypoint in the left image to a keypoint in the right image.
    // If there is a match, depth is computed and the right coordinate associated to the left keypoint is stored.
    void ComputeStereoMatches();

    // Associate a "right" coordinate to a keypoint if there is valid depth in the depthmap.
    void ComputeStereoFromRGBD(const cv::Mat &imDepth);

    // Backprojects a keypoint (if stereo/depth info available) into 3D world coordinates.
    cv::Mat UnprojectStereo(const int &i);

public:
    // Vocabulary used for relocalization.
    ORBVocabulary* mpORBvocabulary;

    // Feature extractor. The right is used only in the stereo case.
    ORBextractor* mpORBextractorLeft, *mpORBextractorRight;

    // Frame timestamp.
    double mTimeStamp;

    // Calibration matrix and OpenCV distortion parameters.//相机模型
    cv::Mat mK;//相机模型内参
    static float fx;
    static float fy;
    static float cx;
    static float cy;
    static float invfx;
    static float invfy;
    cv::Mat mDistCoef;

    // Stereo baseline multiplied by fx.
    float mbf;

    // Stereo baseline in meters.
    float mb;

    // Threshold close/far points. Close points are inserted from 1 view.
    // Far points are inserted as in the monocular case from 2 views.
    float mThDepth;

    // Number of KeyPoints.
    int N; // KeyPoints数量 特征点的个数

    // Vector of keypoints (original for visualization) and undistorted (actually used by the system).
    // In the stereo case, mvKeysUn is redundant as images must be rectified.
    // In the RGB-D case, RGB images can be distorted.
    // mvKeys:原始左图像提取出的特征点(未校正)
    // mvKeysRight:原始右图像提取出的特征点(未校正)
    // mvKeysUn:校正mvKeys后的特征点,对于双目摄像头,一般得到的图像都是校正好的,再校正一次有点多余
    std::vector<cv::KeyPoint> mvKeys, mvKeysRight;//帧对应的特征
    std::vector<cv::KeyPoint> mvKeysUn;

    // Corresponding stereo coordinate and depth for each keypoint.
    // "Monocular" keypoints have a negative value.
    // 对于双目,mvuRight存储了左目像素点在右目中的对应点的横坐标
    // mvDepth对应的深度
    // 单目摄像头,这两个容器中存的都是-1
    std::vector<float> mvuRight;
    std::vector<float> mvDepth;

    // Bag of Words Vector structures.
    DBoW2::BowVector mBowVec;
    DBoW2::FeatureVector mFeatVec;

    // ORB descriptor, each row associated to a keypoint. 每一行,关联一个特征点
    // 左目摄像头和右目摄像头特征点对应的描述子
    cv::Mat mDescriptors, mDescriptorsRight;

    // MapPoints associated to keypoints, NULL pointer if no association.
    // 每个特征点对应的MapPoint
    std::vector<MapPoint*> mvpMapPoints;

    // Flag to identify outlier associations.
    // 观测不到Map中的3D点
    std::vector<bool> mvbOutlier;

    // Keypoints are assigned to cells in a grid to reduce matching complexity when projecting MapPoints.
    // 坐标乘以mfGridElementWidthInv和mfGridElementHeightInv就可以确定在哪个格子
    static float mfGridElementWidthInv;
    static float mfGridElementHeightInv;
    // 每个格子分配的特征点数,将图像分成格子,保证提取的特征点比较均匀    // FRAME_GRID_ROWS 48    // FRAME_GRID_COLS 64
    std::vector<std::size_t> mGrid[FRAME_GRID_COLS][FRAME_GRID_ROWS];

    // Camera pose.
    cv::Mat mTcw; ///< 相机姿态 世界坐标系到相机坐标坐标系的变换矩阵

    // Current and Next Frame id.
    static long unsigned int nNextId; ///< Next Frame id. //下帧的id
    long unsigned int mnId; ///< Current Frame id.//创建帧的计数器,用于设置帧的唯一id

    // Reference Keyframe.
    KeyFrame* mpReferenceKF;//指针,指向参考关键帧

    // Scale pyramid info.
    int mnScaleLevels;//图像提金字塔的层数
    float mfScaleFactor;//图像提金字塔的尺度因子
    float mfLogScaleFactor;//
    vector<float> mvScaleFactors;
    vector<float> mvInvScaleFactors;
    vector<float> mvLevelSigma2;
    vector<float> mvInvLevelSigma2;

    // Undistorted Image Bounds (computed once).
    // 用于确定画格子时的边界
    static float mnMinX;
    static float mnMaxX;
    static float mnMinY;
    static float mnMaxY;

    static bool mbInitialComputations;


private:

    // Undistort keypoints given OpenCV distortion parameters.
    // Only for the RGB-D case. Stereo must be already rectified!
    // (called in the constructor).
    void UndistortKeyPoints();
    //因为图像失真,考虑了畸变,转换后图像的边界信息就发生了变化,这边对帧先计算出边界信息
    // Computes image bounds for the undistorted image (called in the constructor).
    void ComputeImageBounds(const cv::Mat &imLeft);

    // Assign keypoints to the grid for speed up feature matching (called in the constructor).
    void AssignFeaturesToGrid();

    // Rotation, translation and camera center
    cv::Mat mRcw; ///< Rotation from world to camera
    cv::Mat mtcw; ///< Translation from world to camera
    cv::Mat mRwc; ///< Rotation from camera to world
    cv::Mat mOw;  ///< mtwc,Translation from camera to world
};

}// namespace ORB_SLAM

#endif // FRAME_H

 

 

 

——————————————————————

拨开云雾见青天,守得云开见月明

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值