opencv 计算旋转矩阵R,平移矩阵T

问题:已知摄像机内参K ,以及两张不同角度的图片 ,求解摄像机的RT矩阵?

步骤:

           1.从两张图片中提取特征点,本文采用的SURF

           2.匹配特征点,得到相对应的匹配关系

           3.将keyPoint转化为Mat,然后计算基本矩阵F

           4.由基本矩阵F,求本质矩阵E。根据公式 E=(K‘)t * F *K

           5.对E进行SVD分解得到R 和 T 。

原理:参考文章 http://blog.csdn.net/xiao4399/article/details/48037287


#include <iostream>
#include<opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
using namespace std;
using namespace cv;


int main(int argc, char *argv[])
{
        //
        Mat img1 = imread("WIN_20161202_09_04_28_Pro.jpg");
        Mat img2 = imread("WIN_20161202_09_04_47_Pro.jpg");

        if (!img1.data || !img2.data)
            return -1;

        //step1: Detect the keypoints using SURF Detector
        int minHessian = 400;

        SurfFeatureDetector detector(minHessian);

        vector<KeyPoint> keypoints1, keypoints2;

        detector.detect(img1, keypoints1);
        detector.detect(img2, keypoints2);

        //step2: Calculate descriptors (feature vectors)
        SurfDescriptorExtractor extractor;
        Mat descriptors1, descriptors2;
        extractor.compute(img1, keypoints1, descriptors1);
        extractor.compute(img2, keypoints2, descriptors2);

        //step3:Matching descriptor vectors with a brute force matcher
        BFMatcher matcher(NORM_L2,true);
        vector<DMatch> matches;
        matcher.match(descriptors1, descriptors2,matches);

        //Draw matches
        Mat imgMatches;
        drawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);

        resize(imgMatches,imgMatches,Size(192*5,108*5));
      //  namedWindow("Matches");
      //  imshow("brute force Matches", imgMatches);


        int ptcount=(int) matches.size();
        Mat p1(ptcount,2,CV_32F);
        Mat p2(ptcount,2,CV_32F);

        //change keypoint to mat
        Point2f pt;
        for(int i=0;i<ptcount;i++)
        {
            pt=keypoints1[matches[i].queryIdx].pt;
            p1.at<float>(i,0)=pt.x;
            p1.at<float>(i,1)=pt.y;

            pt=keypoints2[matches[i].trainIdx].pt;
            p2.at<float>(i,0)=pt.x;
            p2.at<float>(i,1)=pt.y;
        }

        //use RANSAC to calculate F
        Mat fundamental;
        vector <uchar> RANSACStatus;
        fundamental=findFundamentalMat(p1,p2,RANSACStatus,FM_RANSAC);

        cout<<"F="<<fundamental<<endl;

        double fx,fy,cx,cy;
        fx=700.388086;
        fy=700.784113;
        cx=353.260055;
        cy=223.483445;
        //标定矩阵
        Mat K= cv::Mat::eye(3,3,CV_64FC1);
        K.at<double>(0,0) = fx;
        K.at<double>(1,1) = fy;
        K.at<double>(0,2) = cx;
        K.at<double>(1,2) = cy;

         cout<<"K="<<K<<endl;

        //K转置
        Mat Kt=K.t();

        cout<<"Kt="<<Kt<<endl;

        //E=K't * F * K
        Mat E=Kt*fundamental*K;
        cout<<"E="<<E<<endl;

        SVD svd(E);
//        Matx33d  W(0,-1,0,
//                            1,0,0,
//                            0,0,1);
        Mat W=Mat::eye(3,3,CV_64FC1);
        W.at<double>(0,1)=-1;
        W.at<double>(1,0)=1;
        W.at<double>(2,2)=1;

        Mat_<double> R=svd.u*W*svd.vt;
        Mat_<double> t=svd.u.col(2);
        cout<<"R="<<R<<endl;
        cout<<"t="<<t<<endl;

        //下面的代码为RANSAC优化后的特征点匹配效果
 /*
        //calculate the number of outliner
        int outlinerCount=0;
        for(int i=0;i<ptcount;i++)
        {
            if(RANSACStatus[i]==0)
                outlinerCount++;
        }

        //calculate inLiner
        vector<Point2f> inliner1,inliner2;
        vector<DMatch> inlierMatches;
        int inlinerCount=ptcount-outlinerCount;
        inliner1.resize(inlinerCount);
        inliner2.resize(inlinerCount);
        inlierMatches .resize(inlinerCount);

        int inlinerMatchesCount=0;
        for(int i=0;i<ptcount;i++)
        {
            if(RANSACStatus[i]!=0)
            {
                inliner1[inlinerMatchesCount].x=p1.at<float>(i,0);
                inliner1[inlinerMatchesCount].y=p1.at<float>(i,1);
                inliner2[inlinerMatchesCount].x=p2.at<float>(i,0);
                inliner2[inlinerMatchesCount].y=p2.at<float>(i,1);
                inlierMatches[inlinerMatchesCount].queryIdx=inlinerMatchesCount;
                inlierMatches[inlinerMatchesCount].trainIdx=inlinerMatchesCount;
                inlinerMatchesCount++;
            }
        }

        vector<KeyPoint> key1(inlinerMatchesCount);
        vector<KeyPoint> key2(inlinerMatchesCount);
        KeyPoint::convert(inliner1,key1);
        KeyPoint::convert(inliner2,key2);

        Mat out;
        drawMatches(img1,key1,img2,key2,inlierMatches,out);
        resize(out,out,Size(192*5,108*5));
        imshow("good match result",out);
*/
        waitKey();
}





要获得无人机的旋转矩阵平移向量,需要进行相机姿态估计。在已经获得了相机的内参矩阵和外参矩阵的情况下,可以通过求解相机的旋转矩阵平移向量来得到相机的姿态。具体步骤如下: 1. 读取一张无人机图像,并用cv2.undistort()函数校正图像畸变。 2. 利用OpenCV库中的solvePnP()函数求解相机的旋转矩阵平移向量。 下面是一个简单的示例代码: ```python import cv2 import numpy as np # 读取一张无人机图像 img = cv2.imread('drone.jpg') # 获得相机的内参矩阵和外参矩阵 K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) dist_coef = np.array([k1, k2, p1, p2, k3], dtype=np.float32) R = np.array([[r11, r12, r13], [r21, r22, r23], [r31, r32, r33]], dtype=np.float32) t = np.array([tx, ty, tz], dtype=np.float32) # 校正图像畸变 img = cv2.undistort(img, K, dist_coef) # 选择标定板上的点 obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float32) img_points = np.array([[318, 380], [309, 264], [444, 375], [442, 268]], dtype=np.float32) # 求解相机的旋转矩阵平移向量 success, rvec, tvec = cv2.solvePnP(obj_points, img_points, K, dist_coef) # 将旋转向量转换为旋转矩阵 R, _ = cv2.Rodrigues(rvec) print("旋转矩阵:\n", R) print("平移向量:\n", tvec) ``` 这个示例代码假设已经获得了相机的内参矩阵K、畸变系数dist_coef、外参矩阵R和平移向量t,以及一张无人机图像。根据相机的内参矩阵K和畸变系数dist_coef,可以用cv2.undistort()函数对图像畸变进行校正。然后选择标定板上的点obj_points和对应的图像点img_points,利用cv2.solvePnP()函数求解相机的旋转矩阵平移向量。最后,将旋转向量rvec转换为旋转矩阵R,就可以得到无人机的旋转矩阵平移向量。
评论 17
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值