光流法计算PNP位姿

LK光流特征追踪计算PNP位姿

角点提取及光流追踪

        std::vector<cv::Point2f> prepoint, nextpoint;//前一帧角点,当前帧角点
        std::vector<float> err;
        std::vector<uchar> state;//追踪成功标志 追踪成功=1 失败=0

        cv::goodFeaturesToTrack(imlast, prepoint, 1000, 0.01, 8, cv::Mat(), 3, true, 0.04);//提取上一帧中的角点存在prepoint中
        cv::cornerSubPix(imlast, prepoint, cv::Size(10, 10), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));//亚像素提取
        cv::calcOpticalFlowPyrLK(imlast, imRGB, prepoint, nextpoint, state, err, cv::Size(22, 22), 5, 		cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.01));//根据prepointLK光流追踪 得到结果存放在nextpoint
		/*** 根据光流追踪的结果保存成功追踪的角点*/

        std::vector<cv::Point2f> match_pre,match_cur;//保存匹配成功的点

        int limit_edge_corner=5;//搜索边界
        int sta_label=0;
        for(int i=0;i<state.size();i++)
        {
            int x1 = prepoint[i].x, y1 = prepoint[i].y;
            int x2 = nextpoint[i].x, y2 = nextpoint[i].y;
            if ((x1 < limit_edge_corner || x1 >= imRGB.cols - limit_edge_corner || x2 < limit_edge_corner || x2 >= imRGB.cols - limit_edge_corner||
                 y1 < limit_edge_corner || y1 >= imRGB.rows - limit_edge_corner || y2 < limit_edge_corner || y2 >= imRGB.rows - limit_edge_corner))
                 //如果点的坐标在图像边界框内 舍弃掉
            {
                state[i] = 0;
                continue;
            }
            sta_label=(int)segMask.ptr<uchar>((int)nextpoint[i].y)[(int)nextpoint[i].x];//根据实例分割 判断点是否在先验动态Mask内
            //if(state[i]&&sta_label==0) {
            if(state[i]) {//追踪成功则保存到match数组内
                match_pre.push_back(prepoint[i]);
                match_cur.push_back(nextpoint[i]);
            }
        }

将提取的角点由2D坐标恢复为3D坐标

        ///pnp计算位姿
        int N=match_cur.size();
        // construct input
        std::vector<cv::Point2f> cur_2d(N);//当前帧的2D像素坐标
        std::vector<cv::Point3f> pre_3d(N);//前一帧的2D像素坐标
        std::vector<cv::Point3f> cur_3d(N);//世界坐标系下的3D点

        for(int i=0;i<N;i++) {
            float z1 = imDlast.at<float>(match_pre[i].y, match_pre[i].x);//前一帧的深度
            float z2 = imdepth.at<float>(match_cur[i].y, match_cur[i].x);//当前帧的深度
            if (z1 > 0 && z2 > 0) {//深度都为有效值 >=0
                float x = (match_pre[i].x - mCurrentFrame.cx) * z1 * mCurrentFrame.invfx;//将像素坐标转化为非归一化上一帧的相机坐标系
                float y = (match_pre[i].y - mCurrentFrame.cy) * z1 * mCurrentFrame.invfy;
                //恢复为上一帧中的相机坐标系坐标
                cv::Point3f tmp_3d;
                cv::Mat x3Dp = (cv::Mat_<float>(3, 1) << x, y, z1);
                ///这里得到的相机坐标为上一帧的相机坐标系下观测到的3D点坐标
                const cv::Mat Rlw = mLastFrame.mTcw.rowRange(0,3).colRange(0,3);
                const cv::Mat Rwl = Rlw.t();
                const cv::Mat tlw = mLastFrame.mTcw.rowRange(0,3).col(3);
                const cv::Mat twl = -Rlw.t()*tlw;
                //这里相当于求出了相机到世界的变换Twc,从而把上一帧中的相机坐标系3D点转换成了世界坐标系下的3D点 如果继续用TP 得到的是上一帧的3D在上一帧中的坐标系的点
                //x3Dp=Rwl*x3Dp+twl;
                x3Dp = mLastFrame.mRwc * x3Dp + mLastFrame.mOw;//这两行的转换是相同的
                tmp_3d.x = x3Dp.at<float>(0);
                tmp_3d.y = x3Dp.at<float>(1);
                tmp_3d.z = x3Dp.at<float>(2);
                pre_3d[i] = tmp_3d;

                cur_2d[i].x = match_cur[i].x;
                cur_2d[i].y = match_cur[i].y;
            }
        }

根据匹配好的3D点和2D点进行PNP位姿求解

     // output
            cv::Mat Rvec(3, 1, CV_64FC1);//旋转向量
            cv::Mat Tvec(3, 1, CV_64FC1);//平移向量
            cv::Mat d(3, 3, CV_64FC1);//旋转矩阵
            cv::Mat inliers;//内点数
        //Ransac优化
            int iter_num = 500;//迭代次数
            double reprojectionError = 0.4, confidence = 0.98; // 0.5 0.3 PNP中RANSAC判断内点的重投影误差阈值 小于0.4视为内点
            cv::Mat camera_mat(3, 3, CV_64FC1);//内参矩阵
            cv::Mat distCoeffs = cv::Mat::zeros(1, 4, CV_64FC1);
            camera_mat.at<double>(0, 0) = mCurrentFrame.mK.at<float>(0,0);
            camera_mat.at<double>(1, 1) = mCurrentFrame.mK.at<float>(1,1);
            camera_mat.at<double>(0, 2) = mCurrentFrame.mK.at<float>(0,2);
            camera_mat.at<double>(1, 2) = mCurrentFrame.mK.at<float>(1,2);
            camera_mat.at<double>(2, 2) = 1.0;

            cv::solvePnPRansac(pre_3d, cur_2d, camera_mat, distCoeffs, Rvec, Tvec, false,
                               iter_num, reprojectionError, confidence, inliers, cv::SOLVEPNP_P3P); // AP3P EPNP P3P ITERATIVE DLS
            cv::Rodrigues(Rvec, d);//罗德里格斯公式转换成旋转矩阵

重投影误差计算

            cv::Mat Mod = cv::Mat::eye(4,4,CV_32F);//初始估计模型 欧式变换矩阵T 包含旋转矩阵和平移向量
            // assign the result to current pose
            Mod.at<float>(0,0) = d.at<double>(0,0); Mod.at<float>(0,1) = d.at<double>(0,1); Mod.at<float>(0,2) = d.at<double>(0,2); Mod.at<float>(0,3) = Tvec.at<double>(0,0);
            Mod.at<float>(1,0) = d.at<double>(1,0); Mod.at<float>(1,1) = d.at<double>(1,1); Mod.at<float>(1,2) = d.at<double>(1,2); Mod.at<float>(1,3) = Tvec.at<double>(1,0);
            Mod.at<float>(2,0) = d.at<double>(2,0); Mod.at<float>(2,1) = d.at<double>(2,1); Mod.at<float>(2,2) = d.at<double>(2,2); Mod.at<float>(2,3) = Tvec.at<double>(2,0);

            // calculate the re-projection error
            std::vector<int> MM_inlier;//自己手动计算的内点数
            cv::Mat MotionModel;//ORB中的匀速运动模型
            if (mVelocity.empty())
                MotionModel = cv::Mat::eye(4,4,CV_32F)*mLastFrame.mTcw;
            else
                MotionModel = mVelocity*mLastFrame.mTcw;

            for (int i = 0; i < pre_3d.size(); ++i) {
                if (pre_3d[i].z > 0&&(cur_2d[i].x!=0&&cur_2d[i].y!=0)) {//遍历每个匹配的有效的3D-2D点 深度大于0 且 当前像素点不为0
                    const cv::Mat x3D = (cv::Mat_<float>(3, 1) << pre_3d[i].x, pre_3d[i].y, pre_3d[i].z);//point3f转换为cv::Mat
                    const cv::Mat x3D_c =MotionModel.rowRange(0, 3).colRange(0, 3) * x3D + MotionModel.rowRange(0, 3).col(3);
                    //R*x+t 从世界坐标系转换为当前帧的相机坐标系
                    cout<<x3D_c<<endl;
                    cout<<"u="<<cur_2d[i].x<<"v="<<cur_2d[i].y<<endl;
                    if(x3D_c.at<float>(2)<0)
                        continue;

                    const float xc = x3D_c.at<float>(0);
                    const float yc = x3D_c.at<float>(1);
                    const float invzc = 1.0 / x3D_c.at<float>(2);

                    const float u = mCurrentFrame.fx * xc * invzc + mCurrentFrame.cx;//相机坐标系转化为图像像素坐标
                    const float v = mCurrentFrame.fy * yc * invzc + mCurrentFrame.cy;
                    const float u_ = cur_2d[i].x - u;
                    const float v_ = cur_2d[i].y - v;
                    const float Rpe = std::sqrt(u_ * u_ + v_ * v_);

                    mCurrentFrame.mvMatch.push_back(cur_2d[i]);
                    mCurrentFrame.mvRpe.push_back(Rpe);

                    if (Rpe < reprojectionError) {
                        MM_inlier.push_back(i);
                    }
                }
            }
            cv::Mat output;
            cout<<"PNPinliers:"<<inliers.rows<<endl<<"MM_inliers:"<<MM_inlier.size()<<endl;

调试输出是
PNPinliers:221
MM_inliers:5
利用cv::projector函数计算重投影误差也得不到对应的模型内点数目。平均重投影误差大小在5左右。
也就是PNP内部计算的重投影误差小于0.4的点是221个
我用PNP位姿手动计算重投影误差小于0.4的点是5个
问题不知道出在哪里

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值