最近在做自动泊车项目中的车位线检测,用到了将图像像素坐标转换为真实世界坐标的过程,该过程可以通过世界坐标到图像像素坐标之间的关系进行求解,在我的一篇博文中已经详细讲解了它们之间的数学关系,不清楚的童鞋们可参考:相机标定:从世界坐标系到图像像素坐标系转换过程解析
推导过程如下:
一般情况下,摄像头的内参是我们事先标定好的,具体标定方法可以参考这篇博文:张正友相机标定Opencv实现以及标定流程&&标定结果评价&&图像矫正流程解析(附标定程序和棋盘图)
其次,用到了相机位姿估计中求解PNP的问题,相机位姿估计就是通过几个已知坐标的特征点,以及他们在相机照片中的成像,从而得到相机坐标系关于世界坐标系的旋转矩阵R与平移矩阵T,可以直接调用opencv::solvePnP函数求解,详细讲解可以参考这篇博文:相机位姿估计
具体标定方法如下:
1.以车身中心投影到地面的位置为世界坐标系的原点,垂直于地面朝上为z轴正向,车头方向为x轴的正向,y轴方向满足右手法则。
2.车位线识别中只用到了右侧摄像头,在水平地面上放置标定板,不要共线,记录出标定板的世界坐标(至少需要4个点)以及在图像中的像素坐标。
3.调用opencv::solvePnP求解相机坐标系关于世界坐标系的旋转矩阵R与平移矩阵T。
4.根据上面公式的推倒求解出图像中车位线在世界坐标中的真实位置。
下面是实现该过程的c++/opencv的代码:
#include <opencv2/opencv.hpp>
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main()
{
// 首先通过标定板的图像像素坐标以及对应的世界坐标,通过PnP求解相机的R&T//
Point2f point;
vector<Point2f> boxPoints; //存入像素坐标
// Loading image
Mat sourceImage = imread("2.bmp");
namedWindow("Source", 1);
/ Setting box corners in image
//one Point
point = Point2f((float) 558, (float) 259); //640X480
boxPoints.push_back(point);
circle(sourceImage, boxPoints[0], 3, Scalar(0, 255, 0), -1, 8);
two Point
point = Point2f((float) 629, (float) 212); //640X480
boxPoints.push_back(point);
circle(sourceImage, boxPoints[1], 3, Scalar(0, 255, 0), -1, 8);
three Point
point = Point2f((float) 744, (float) 260); //640X480
boxPoints.push_back(point);
circle(sourceImage, boxPoints[2], 3, Scalar(0, 255, 0), -1, 8);
four Point
point = Point2f((float) 693, (float) 209); //640X480
boxPoints.push_back(point);
circle(sourceImage, boxPoints[3], 3, Scalar(0, 255, 0), -1, 8);
//Setting box corners in real world
vector<Point3f> worldBoxPoints; //存入世界坐标
Point3f tmpPoint;
tmpPoint = Point3f((float) 2750, (float) 890, (float) 0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float) 3500, (float) 450, (float) 0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float) 2790, (float) -240, (float) 0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float) 3620, (float) -50, (float) 0);
worldBoxPoints.push_back(tmpPoint);
//camera intristic///
cv::Mat cameraMatrix1=Mat::eye(3, 3, cv::DataType<double>::type); //相机内参矩阵
cv::Mat distCoeffs1(5, 1, cv::DataType<double>::type); //畸变参数
distCoeffs1.at<double>(0,0) = 0.061439051;
distCoeffs1.at<double>(1,0) = 0.03187556;
distCoeffs1.at<double>(2,0) = -0.00726151;
distCoeffs1.at<double>(3,0) = -0.00111799;
distCoeffs1.at<double>(4,0) = -0.00678974;
//Taken from Mastring OpenCV d
double fx = 328.61652824;
double fy = 328.56512516;
double cx = 629.80551148;
double cy = 340.5442837;
cameraMatrix1.at<double>(0, 0) = fx;
cameraMatrix1.at<double>(1, 1) = fy;
cameraMatrix1.at<double>(0, 2) = cx;
cameraMatrix1.at<double>(1, 2) = cy;
//PnP solve R&T///
cv::Mat rvec1(3, 1, cv::DataType<double>::type); //旋转向量
cv::Mat tvec1(3, 1, cv::DataType<double>::type); //平移向量
cv::solvePnP(worldBoxPoints, boxPoints, cameraMatrix1, distCoeffs1, rvec1, tvec1, false,CV_ITERATIVE);
cv::Mat rvecM1(3, 3, cv::DataType<double>::type); //旋转矩阵
cv::Rodrigues(rvec1, rvecM1);
/此处用于求相机位于坐标系内的旋转角度,2D-3D的转换并不用求
const double PI=3.1415926;
double thetaZ=atan2(rvecM1.at<double>(1,0),rvecM1.at<double>(0,0))/PI*180;
double thetaY=atan2(-1*rvecM1.at<double>(2,0),sqrt(rvecM1.at<double>(2,1)*rvecM1.at<double>(2,1)
+rvecM1.at<double>(2,2)*rvecM1.at<double>(2,2)))/PI*180;
double thetaX=atan2(rvecM1.at<double>(2,1),rvecM1.at<double>(2,2))/PI*180;
cout<<"theta x "<<thetaX<<endl<<"theta Y: "<<thetaY<<endl<<"theta Z: "<<thetaZ<<endl;
///根据公式求Zc,即s
cv::Mat imagePoint = cv::Mat::ones(3, 1, cv::DataType<double>::type);
cv::Mat tempMat, tempMat2;
//输入一个2D坐标点,便可以求出相应的s
imagePoint.at<double>(0,0)=558;
imagePoint.at<double>(1,0)=259;
double zConst = 0;//实际坐标系的距离
//计算参数s
double s;
tempMat = rvecM1.inv() * cameraMatrix1.inv() * imagePoint;
tempMat2 = rvecM1.inv() * tvec1;
s = zConst + tempMat2.at<double>(2, 0);
s /= tempMat.at<double>(2, 0);
cout<<"s : "<<s<<endl;
///3D to 2D
cv::Mat worldPoints=Mat::ones(4,1,cv::DataType<double>::type);
worldPoints.at<double>(0,0)=3620;
worldPoints.at<double>(1,0)=-590;
worldPoints.at<double>(2,0)=0;
cout<<"world Points : "<<worldPoints<<endl;
Mat image_points=Mat::ones(3,1,cv::DataType<double>::type);
//setIdentity(image_points);
Mat RT_;
hconcat(rvecM1,tvec1,RT_);
cout<<"RT_"<<RT_<<endl;
image_points=cameraMatrix1*RT_*worldPoints;
Mat D_Points=Mat::ones(3,1,cv::DataType<double>::type);
D_Points.at<double>(0,0)=image_points.at<double>(0,0)/image_points.at<double>(2,0);
D_Points.at<double>(1,0)=image_points.at<double>(1,0)/image_points.at<double>(2,0);
//cv::projectPoints(worldPoints, rvec1, tvec1, cameraMatrix1, distCoeffs1, imagePoints);
cout<<"3D to 2D: "<<D_Points<<endl;
//camera_coordinates
Mat camera_cordinates=-rvecM1.inv()*tvec1;
/2D to 3D///
cv::Mat imagePoint_your_know = cv::Mat::ones(3, 1, cv::DataType<double>::type); //u,v,1
imagePoint_your_know.at<double>(0, 0) = 558;
imagePoint_your_know.at<double>(1, 0) = 259;
Mat wcPoint = rvecM1.inv() * (cameraMatrix1.inv() *s*imagePoint_your_know - tvec1);
Point3f worldPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0), wcPoint.at<double>(2, 0));
cout <<"2D to 3D :"<< worldPoint << endl;
imshow("Source",sourceImage);
waitKey(0);
return 0;
}
求出各种参数以后可以将其封装为一个函数,实现图像坐标到世界坐标的转换:
Point3f getWorldPoints(Point2f inPoints)
{
cv::Mat imagePoint = cv::Mat::ones(3, 1, cv::DataType<double>::type); //u,v,1
imagePoint.at<double>(0, 0) = inPoints.x;
imagePoint.at<double>(1, 0) = inPoints.y;
Mat wcPoint = rotationMatrix.inv() * (s * cameraMatrix.inv() * imagePoint - tvec);
Point3f worldPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0), wcPoint.at<double>(2, 0));
return worldPoint;
}