网上的PnP平差没有数据,很痛苦,自己制作数据来解
根据其次坐标的推导公式
在Matlab中制作数据(矩阵运算比较方便_)
clc
clear
X = zeros(15, 3);
x = zeros(15, 2);
K=[25 0 0;0 25 0;0 0 1;]%相机内参数矩阵
R=[0.4770710827172032, -0.7476726229304006, -0.4619402893831741;%旋转矩阵
0.653281340399395, 0.653280157390419, -0.3826859368279936;
0.5879002230999792, -0.1192085772095462, 0.8001016452918156];
%T表示投影矩阵P的第四列前三行
P4=[31.36648617639075;
-17.35639716512237;
-40.18578173498011];
T=inv(R)*P4;%[u;v]=K*R*[X;Y;Z]+T
%以下为带入一组数据验证公式的正确性
%A=K*(R*([50, 50, 75]')+P4);
%A=A/A(3)
for i = 1:15
px = (101-99)*rand() + 99;
py = 2*rand() - 1;
pz = 2*rand() + 2;
X(i,1)=px;
X(i,2)=py;
X(i,3)=pz;
m = K*R* ([px;py;pz]-P4);
u = m(1) / m(3);
v = m(2) / m(3);
x(i,1) = u;
x(i,2) = v;
end
XT=X;
xt=x';
%一下为一种将数据保存.txt格式的方法
% dlmwrite('myFile.txt',X,'delimiter',',')
% type('myFile.txt')
% dlmwrite('myFile2.txt',x,'delimiter',',')
% type('myFile2.txt')
fileID1=fopen('X.txt','w');
fprintf(fileID1,'corners3D.push_back(Point3f(%1f, %1f, %1f));\n',XT);
fclose(fileID1);
type('X.txt')
fileID2=fopen('x2.txt','w');
fprintf(fileID2,'corners2D.push_back(Point2f(%1f, %1f));\n',xt);
fclose(fileID2);
type('x2.txt')
在c++中利用ceres_solver解决PnP问题
输入了8组3D、2D点
用到了Eigen、Opencv、ceres三个库
#include <Eigen/Core>
#include <ceres/ceres.h>
#include <ceres/rotation.h>
#include <opencv2/opencv.hpp>
#include <ceres/rotation.h>
using namespace cv;
using namespace Eigen;
using namespace std;
// 代价函数的计算模型
struct PnPCeres {
PnPCeres(double fx, double fy, double cx, double cy, const cv::Point2f &image_point, const cv::Point3f &scene_point)
: fx_(fx), fy_(fy), cx_(cx), cy_(cy), uv(image_point), xyz(scene_point) {}
// 残差的计算
template <typename T>
bool operator()(const T *const camera, // 位姿参数,有6维
T *residual) const // 残差
{
T sp[3];
sp[0] = T(xyz.x);
sp[1] = T(xyz.y);
sp[2] = T(xyz.z);
T p[3];
T c[3];
c[0] = camera[0];
c[1] = camera[1];
c[2] = camera[2];
ceres::AngleAxisRotatePoint(c, sp, p);
//cout << T(p[0]) << "X " << T(p[1]) << " Y" << T(p[2]) << std::endl;
// camera[3,4,5] are the translation.
p[0] += camera[3];
p[1] += camera[4];
p[2] += camera[5];
//cout << camera[3] << "camera3" << camera[4] << " camera4" << camera[5] << std::endl;
//cout << T(p[0]) << "X " << T(p[1]) << " Y" << T(p[2]) << std::endl;
T xp, yp;
// now project the point to the camera space
if (fabs(xyz.z) < 0.01) {
xp = T(fx_) * p[0] + T(cx_);
yp = T(fy_) * p[1] + T(cy_);
} else {
xp = T(fx_) * p[0] / p[2] + T(cx_);
yp = T(fy_) * p[1] / p[2] + T(cy_);
}
// The error is the difference between the predicted and observed position.
residual[0] = T(uv.x) - xp;
residual[1] = T(uv.y) - yp;
//cout << residual[0] << " " << residual[1] << endl;
std::cout << endl;
return true;
}
static ceres::CostFunction *Create(double fx, double fy, double cx, double cy, const cv::Point2f &image_point,
const cv::Point3f &scene_point) {
return (
new ceres::AutoDiffCostFunction<PnPCeres, 2, 6>(new PnPCeres(fx, fy, cx, cy, image_point, scene_point)));
}
double fx_, fy_, cx_, cy_;
const Point2f uv;
const Point3f xyz;
};
int main() {
Matrix3d R1;
//以下为输出旋转矩阵R对应特征向量
/*
R1<<0.4770710827172032, -0.7476726229304006, -0.4619402893831741,0.653281340399395, 0.653280157390419, -0.3826859368279936;0.5879002230999792, -0.1192085772095462, 0.8001016452918156;
Vector3d rr = rodrigues(R1);
std::cout <<"rodrigues(R1)"<< rr << std::endl;*/
std::vector<Point2f> corners2D;
std::vector<Point3f> corners3D;
{
corners3D.push_back(Point3f(50, 50, 75));
corners3D.push_back(Point3f(70, 80, 75));
corners3D.push_back(Point3f(50, 80, 65));
corners3D.push_back(Point3f(30, 45, 120));
corners3D.push_back(Point3f(15, 40, 100));
corners3D.push_back(Point3f(95, 30, 110));
corners3D.push_back(Point3f(110, 36, 67));
corners3D.push_back(Point3f(-50, -35, 150));
corners2D.push_back(Point2f(-9.71482, 11.1372));
corners2D.push_back(Point2f(-14.4338, 25.2411));
corners2D.push_back(Point2f(-27.3207, 33.6937));
corners2D.push_back(Point2f(-15.9325, -5.24333));
corners2D.push_back(Point2f(-21.4124, -11.2221));
corners2D.push_back(Point2f(0.860326, 5.54655));
corners2D.push_back(Point2f(8.8004, 17.7452));
corners2D.push_back(Point2f(-16.3028, -59.6481));
}
double fx = 25;
double fy = fx;
double cx = 0;
double cy = 0;
ceres::Problem problem;
double camera[6] = {0.16, -0.6, 0.8, 31, -17, -40};
std::cout << std::endl << "Inital pose: ";
std::cout << camera[0] << ", " << camera[1] << ", " << camera[2] << ", ";
std::cout << camera[3] << ", " << camera[4] << ", " << camera[5] << std::endl;
std::cout << "corners2D.size()" << corners2D.size() << std::endl;
for (int i = 0; i < corners2D.size(); ++i) {
std::cout << i << std::endl;
std::cout << corners2D[i].x << " " << corners2D[i].y << std::endl;
std::cout << corners3D[i].x << " " << corners3D[i].y << " " << corners3D[i].z << std::endl;
ceres::CostFunction *cost_function = PnPCeres::Create(fx, fy, cx, cy, corners2D[i], corners3D[i]);
problem.AddResidualBlock(cost_function, NULL /* squared loss */, camera);
}
ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_SCHUR;
options.minimizer_progress_to_stdout = true;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
std::cout << summary.FullReport() << "\n";
std::cout << camera[0] << ", " << camera[1] << ", " << camera[2] << ", ";
std::cout << camera[3] << ", " << camera[4] << ", " << camera[5] << std::endl;
return 0;
}
不错资料
https://blog.csdn.net/qq_15642411/article/details/84652889
手写高斯牛顿发解PnP
《视觉SLAM14讲》有手写的高斯_牛顿法估计PnP