SVO的初始化

VO的初始化,一般是通过匹配特征点之间的几何约束恢复位姿。下面是一些常用的opencv相关接口,代码未经过测试,仅作参考。

利用LK光流和本质矩阵恢复的初始化:

void Initialization::addFrame(cv::Mat& frame)
{
    if(is_first_frame_){
        frame_ref_ = frame;
        detectFeatures(frame_ref_,p2d_ref_);
        is_first_frame_ = false;
    }
    frame_cur_ = frame;
    detectFeatures(frame_cur_,p2d_cur_);
    return;
}

void Initialization::detectFeatures(cv::Mat& frame,vector<cv::Point2f>& p2d)
{
    auto detector = cv::FastFeatureDetector::create(10,true);
    vector<cv::KeyPoint> kpts;
    detector->detect(frame,kpts);
    p2d.resize(kpts.size());
    for(auto kpt:kpts) p2d.emplace_back(kpt.pt);
    return;
}


void Initialization::trackFeatures(cv::Mat& frame_ref, cv::Mat& frame_cur,
    vector<cv::Point2f>& p2d_ref, vector<cv::Point2f>& p2d_cur)
{
    const int win_size = 10;
    vector<uchar> features_found;
    cv::calcOpticalFlowPyrLK(frame_ref,frame_cur,p2d_ref,p2d_cur,features_found,
        cv::noArray(), cv::Size(win_size*2+1,win_size*2+1),3,cv::TermCriteria(
            cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS,20,0.3));
    auto p2d_ref_it = p2d_ref.begin();
    auto p2d_cur_it = p2d_cur.begin();
    for(int i=0;i<features_found.size();i++,p2d_ref_it++,p2d_cur_it++){
        if(!features_found[i]){
            p2d_ref.erase(p2d_ref_it);
            p2d_cur.erase(p2d_cur_it);
        }
    }
    return;
}

void Initialization::recoverRT(vector<cv::Point2f>& p2d_ref, vector<cv::Point2f>& 
    p2d_cur,cv::Mat& cameraMatrix)
{
    cv::Mat E, R, t;
    E = findEssentialMat(p2d_ref, p2d_cur, cameraMatrix, cv::RANSAC, 0.999, 1.0, cv::noArray());
    recoverPose(E, p2d_ref, p2d_cur, cameraMatrix, R, t, cv::noArray());
	return;
} 

除了使用光流法,另一个常用的特征是ORB特征:
这里需要注意的是ORB描述子是二进制字符而非浮点值,匹配需要使用汉明距离。

void Initialization::detectORBFeatures(cv::Mat& frame, vector<cv::Point2f>& p2d, 
    cv::Mat& desc)
{
    auto orb = ORB::create();
    vector<cv::KeyPoint> kpts;
    orb->detectAndCompute(frame, cv::noArray(), kpts, desc);
    p2d.resize(kpts.size());
    for(auto kpt:kpts) p2d.emplace_back(kpt.pt);
    return;
}

void Initialization::trackORBFeatures(vector<cv::Point2f>& p2d_ref, 
    vector<cv::Point2f>& p2d_cur, cv::Mat& desc_ref, cv::Mat& desc_cur)
{
    vector<DMatch> matches;
    Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create ( "BruteForce-Hamming" );
    matcher->match ( desc_cur, desc_cur, matches);
    vector<cv::Point2f> p2d_ref_temp, p2d_cur_temp;
    p2d_ref_temp.resize(matches.size());
    p2d_cur_temp.resize(matches.size());
    for (auto it= matches.begin();it!= matches.end(); ++it)
    {
        p2d_ref_temp.push_back(p2d_ref[it->queryIdx]);
        p2d_cur_temp.push_back(p2d_ref[it->trainIdx]);
    }
    p2d_ref = p2d_ref_temp;
    p2d_cur = p2d_cur_temp;
    return;
}

另外,当特征点都在同一平面上时,还可以使用单应性矩阵,但是恢复得到的是多个R,t。

void Initialization::recoverFromHomography(vector<cv::Point2f>& p2d_ref, vector<cv::Point2f>& 
    p2d_cur,cv::Mat& cameraMatrix, cv::Mat& distCoeffs)
{
    cv::Mat H, R, t;
    vector<cv::Point2f> p2d_ref_undistort, p2d_cur_undistort;
    cv::undistortPoints(p2d_ref,p2d_ref_undistort, cameraMatrix, distCoeffs,
        cv::noArray(),cv::noArray());
    cv::undistortPoints(p2d_cur,p2d_cur_undistort, cameraMatrix, distCoeffs,
        cv::noArray(),cv::noArray());

    H = findHomography(p2d_ref_undistort, p2d_cur_undistort);
    vector<cv::Mat> rotations,translations,normals;
    cv::decomposeHomographyMat(H,cameraMatrix,rotations,translations,normals);
    return;	
}
©️2020 CSDN 皮肤主题: 技术黑板 设计师:CSDN官方博客 返回首页