VIO-单目BA求解
深蓝学院vio课程的学习记录,仅个人备忘。
代码部分
int main() {
// 准备数据
vector<Frame> cameras;
vector<Eigen::Vector3d> points;
GetSimDataInWordFrame(cameras, points);//camrea中存放的是R,t的vector
Eigen::Quaterniond qic(1, 0, 0, 0); //相机至IMU的旋转
Eigen::Vector3d tic(0, 0, 0); //相机至IMU的平移
Frame代码部分(保存每帧的姿态与观测):
struct Frame {
Frame(Eigen::Matrix3d R, Eigen::Vector3d t) : Rwc(R), qwc(R), twc(t) {};
Eigen::Matrix3d Rwc; //旋转阵
Eigen::Quaterniond qwc;//四元数
Eigen::Vector3d twc; //平移向量
unordered_map<int, Eigen::Vector3d> featurePerId; // 该帧观测到的特征以及特征id
};
GetSimDataInWordFrame()代码部分用来产生世界坐标系下的虚拟数据: 相机姿态, 特征点, 以及每帧观测
void GetSimDataInWordFrame(vector<Frame> &cameraPoses, vector<Eigen::Vector3d> &points) {
int featureNums = 20; // 特征数目,假设每帧都能观测到所有的特征
int poseNums = 3; // 相机数目
double radius = 8;
for (int n = 0; n < poseNums; ++n) {
double theta = n * 2 * M_PI / (poseNums * 4); // 1/4 圆弧
// 绕 z轴 旋转
Eigen::Matrix3d R;
R = Eigen::AngleAxisd(theta, Eigen::Vector3d::UnitZ());//绕z轴旋转,角轴
Eigen::Vector3d t = Eigen::Vector3d(radius * cos(theta) - radius, radius * sin(theta), 1 * sin(2 * theta));//平移向量
cameraPoses.push_back(Frame(R, t));
}
// 随机数生成三维特征点
std::default_random_engine generator;//随机数发生器
std::normal_distribution<double> noise_pdf(0., 1. / 1000.); // 2pixel / focal高斯分布,N~(0,1/1000)
for (int j = 0; j < featureNums; ++j) {
std::uniform_real_distribution<double> xy_rand(-4, 4.0);//x、y在[-4,4)之间产生
std::uniform_real_distribution<double> z_rand(4., 8.);//z在[4,8)之间产生
Eigen::Vector3d Pw(xy_rand(generator), xy_rand(generator), z_rand(generator));
points.push_back(Pw);//世界坐标系下的三维点存放至points中
// 在每一帧上的观测量
for (int i = 0; i < poseNums; ++i) {
Eigen::Vector3d Pc = cameraPoses[i].Rwc.transpose() * ( Pw - cameraPoses[i].twc);//将三维空间的坐标点转换至相机坐标点,Pc=Rwc^T×(Pw-twc)
Pc = Pc / Pc.z(); // 归一化图像平面
Pc[0] += noise_pdf(generator);//为观测点添加噪声
Pc[1] += noise_pdf(generator);
cameraPoses[i].featurePerId.insert(make_pair(j, Pc));//将含有噪声观测点的信息与ID存放到camras中
}
}
}
构建Problem问题:
Problem problem(Problem::ProblemType::SLAM_PROBLEM);//这里选的类型是SLAM类型,对于H阵,可使用舒尔补进行加速
添加顶点Vertex,在这段程序中,待优化的变量是相机的位姿:
// 所有 Pose
vector<shared_ptr<VertexPose> > vertexCams_vec;
for (size_t i = 0; i < cameras.size(); ++i) {
shared_ptr<VertexPose> vertexCam(new VertexPose());
Eigen::VectorXd pose(7);
pose << cameras[i].twc, cameras[i].qwc.x(), cameras[i].qwc.y(), cameras[i].qwc.z(), cameras[i].qwc.w();
vertexCam->SetParameters(pose);
// if(i < 2)
// vertexCam->SetFixed();
problem.AddVertex(vertexCam);
vertexCams_vec.push_back(vertexCam);
}
std::default_random_engine generator;
std::normal_distribution<double> noise_pdf(0, 1.);//N~(0,1)
double noise = 0;
vector<double> noise_invd;
vector<shared_ptr<VertexInverseDepth> > allPoints;//逆深度
for (size_t i = 0; i < points.size(); ++i) {
//假设所有特征点的起始帧为第0帧, 逆深度容易得到
Eigen::Vector3d Pw = points[i];
Eigen::Vector3d Pc = cameras[0].Rwc.transpose() * (Pw - cameras[0].twc);//三维点的世界坐标系转化到相机坐标系,R^T×(Pw-twc)
noise = noise_pdf(generator);
double inverse_depth = 1. / (Pc.z() + noise);//带噪声的逆深度
// double inverse_depth = 1. / Pc.z();
noise_invd.push_back(inverse_depth);
// 初始化特征 vertex
shared_ptr<VertexInverseDepth> verterxPoint(new VertexInverseDepth());//特征点的顶点
VecX inv_d(1);//优化变量只有一维,因为是逆深度
inv_d << inverse_depth;
verterxPoint->SetParameters(inv_d);//设置顶点参数
//cout<<"___________"<<end;
problem.AddVertex(verterxPoint);//向problem中添加逆深度的顶点,和相机位姿类似
allPoints.push_back(verterxPoint);
// 每个特征对应的投影误差, 第 0 帧为起始帧
for (size_t j = 1; j < cameras.size(); ++j) {
Eigen::Vector3d pt_i = cameras[0].featurePerId.find(i)->second;//第0帧所观测到的点
Eigen::Vector3d pt_j = cameras[j].featurePerId.find(i)->second;//第j帧所观测到的点
//cout<<"++++++++++++++++"<<endl;
shared_ptr<EdgeReprojection> edge(new EdgeReprojection(pt_i, pt_j));//初始化边
//cout<<"__________"<<endl;
edge->SetTranslationImuFromCamera(qic, tic);//设置相机到IMU的T
std::vector<std::shared_ptr<Vertex> > edge_vertex;//定义顶点指针,里面放的是三维点的顶点,第零个相机的顶点以及第J个相机的顶点
edge_vertex.push_back(verterxPoint);
edge_vertex.push_back(vertexCams_vec[0]);
edge_vertex.push_back(vertexCams_vec[j]);
// cout<<",,,,,,,,"<<endl;
edge->SetVertex(edge_vertex);//设置顶点,链接顶点和边
// cout<<"???????"<<endl;
problem.AddEdge(edge);//添加边
//cout<<"XXXXXXXX"<<endl;
}
}
——————————————————————————————————————————————————————————————————————————————————————
EdgeReprojection(const Vec3 &pts_i, const Vec3 &pts_j)
: Edge(2, 3, std::vector<std::string>{"VertexInverseDepth", "VertexPose", "VertexPose"}) {//这个边的定义继承自Edge
pts_i_ = pts_i;
pts_j_ = pts_j;
}
——————————————————————————————————————————————————————————————————————————————————————
Edge::Edge(int residual_dimension, int num_verticies,//参数是残差维度、顶点数量、顶点类型名称
const std::vector<std::string> &verticies_types) {
residual_.resize(residual_dimension, 1);//残差的大小给的是2,因为是重投影误差,u,v两维
// verticies_.resize(num_verticies); // TODO:: 这里可能会存在问题,比如这里resize了3个空,后续调用edge->addVertex. 使得vertex前面会存在空元素
if (!verticies_types.empty())
verticies_types_ = verticies_types;
jacobians_.resize(num_verticies);//雅克比大小为3.三元边,一个顶点,两个相机位姿
id_ = global_edge_id++;//id_是3~23,因为前面还有相机0~2
Eigen::MatrixXd information(residual_dimension, residual_dimension);信息阵大小是2×2
information.setIdentity();
information_ = information;
//std::cout<<":::::"<<information_.size()<<std::endl;
//cout<<"Edge construct residual_dimension="<<residual_dimension
// << ", num_verticies="<<num_verticies<<", id_="<<id_<<endl;
}
————————————————————————————————————————————————————————————————————————————————————
bool Problem::AddEdge(shared_ptr<Edge> edge) {
if (edges_.find(edge->Id()) == edges_.end()) {//查看是否重复顶点
edges_.insert(pair<ulong, std::shared_ptr<Edge>>(edge->Id(), edge));//插入边
} else {
// LOG(WARNING) << "Edge " << edge->Id() << " has been added before!";
return false;
}
for (auto &vertex: edge->Verticies()) {//遍历每个边的顶点,每个顶点循环次数为2
vertexToEdge_.insert(pair<ulong, shared_ptr<Edge>>(vertex->Id(), edge));//vertex->Id()的值为3,0,1;3,0,2~22,0,1;22,0,2
// std::cout<<"++++++"<<vertex->Id()<<std::endl;
}
return true;
}
顶点与边设置完成后,开始用Solve
进行求解
problem.Solve(5);//迭代次数为5
bool Problem::Solve(int iterations) {
if (edges_.size() == 0 || verticies_.size() == 0) {
std::cerr << "\nCannot solve problem without edges or verticies" << std::endl;
return false;
}
TicToc t_solve;
// 统计优化变量的维数,为构建 H 矩阵做准备
SetOrdering();
// 遍历edge, 构建 H 矩阵
MakeHessian();
// LM 初始化
ComputeLambdaInitLM();
// LM 算法迭代求解
bool stop = false;
int iter = 0;
while (!stop && (iter < iterations)) {
std::cout << "iter: " << iter << " , chi= " << currentChi_ << " , Lambda= " << currentLambda_ << std::endl;
bool oneStepSuccess = false;
int false_cnt = 0;
while (!oneStepSuccess) // 不断尝试 Lambda, 直到成功迭代一步
{
// setLambda
// AddLambdatoHessianLM();
// 第四步,解线性方程
SolveLinearSystem();
//
// RemoveLambdaHessianLM();
// 优化退出条件1: delta_x_ 很小则退出
if (delta_x_.squaredNorm() <= 1e-6 || false_cnt > 10) {
stop = true;
break;
}
// 更新状态量
UpdateStates();
// 判断当前步是否可行以及 LM 的 lambda 怎么更新
oneStepSuccess = IsGoodStepInLM();
// 后续处理,
if (oneStepSuccess) {
// 在新线性化点 构建 hessian
MakeHessian();
// TODO:: 这个判断条件可以丢掉,条件 b_max <= 1e-12 很难达到,这里的阈值条件不应该用绝对值,而是相对值
// double b_max = 0.0;
// for (int i = 0; i < b_.size(); ++i) {
// b_max = max(fabs(b_(i)), b_max);
// }
// // 优化退出条件2: 如果残差 b_max 已经很小了,那就退出
// stop = (b_max <= 1e-12);
false_cnt = 0;
} else {
false_cnt ++;
RollbackStates(); // 误差没下降,回滚
}
}
iter++;
// 优化退出条件3: currentChi_ 跟第一次的chi2相比,下降了 1e6 倍则退出
if (sqrt(currentChi_) <= stopThresholdLM_)
stop = true;
}
std::cout << "problem solve cost: " << t_solve.toc() << " ms" << std::endl;
std::cout << " makeHessian cost: " << t_hessian_cost_ << " ms" << std::endl;
return true;
}
——————————————————————————————————————————————————————————————————————————————————————
//统计优化变量的维数,准备构建H阵
void Problem::SetOrdering() {//设各个顶点的序号
// 每次重新计数
ordering_poses_ = 0;
ordering_generic_ = 0;
ordering_landmarks_ = 0;
int debug = 0;
// Note:: verticies_ 是 map 类型的, 顺序是按照 id 号排序的
for (auto vertex: verticies_) {//开始遍历所有顶点
ordering_generic_ += vertex.second->LocalDimension(); // 所有的优化变量总维数
if (IsPoseVertex(vertex.second)) {//统计位姿顶点的维数
debug += vertex.second->LocalDimension();
}
if (problemType_ == ProblemType::SLAM_PROBLEM) // 如果是 slam 问题,还要分别统计 pose 和 landmark 的维数,后面会对他们进行排序
{
AddOrderingSLAM(vertex.second);
}
if (IsPoseVertex(vertex.second)) {
std::cout << vertex.second->Id() << " order: " << vertex.second->OrderingId() << std::endl;
}
}
std::cout << "\n ordered_landmark_vertices_ size : " << idx_landmark_vertices_.size() << std::endl;
if (problemType_ == ProblemType::SLAM_PROBLEM) {
// 这里要把 landmark 的 ordering 加上 pose 的数量,就保持了 landmark 在后,而 pose 在前
ulong all_pose_dimension = ordering_poses_;
for (auto landmarkVertex : idx_landmark_vertices_) {
landmarkVertex.second->SetOrderingId(
landmarkVertex.second->OrderingId() + all_pose_dimension//18~37
);
}
}
// CHECK_EQ(CheckOrdering(), true);
}
×××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
void Problem::AddOrderingSLAM(std::shared_ptr<myslam::backend::Vertex> v) {
if (IsPoseVertex(v)) {
v->SetOrderingId(ordering_poses_);//=0
idx_pose_vertices_.insert(pair<ulong, std::shared_ptr<Vertex>>(v->Id(), v));
ordering_poses_ += v->LocalDimension();//统计位姿顶点的维度+6
} else if (IsLandmarkVertex(v)) {
v->SetOrderingId(ordering_landmarks_);//=0
ordering_landmarks_ += v->LocalDimension();//统计路标顶点的维度+1
idx_landmark_vertices_.insert(pair<ulong, std::shared_ptr<Vertex>>(v->Id(), v));
}
}
×××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
//统计完优化变量维度后,开始准备构建H阵
void Problem::MakeHessian() {
TicToc t_h;
// 直接构造大的 H 矩阵
ulong size = ordering_generic_;//所有优化点的维度。18+20
MatXX H(MatXX::Zero(size, size));
VecX b(VecX::Zero(size));
for (auto &edge: edges_) {//开始遍历所有的边
edge.second->ComputeResidual();//计算残差
edge.second->ComputeJacobians();//计算雅克比
auto jacobians = edge.second->Jacobians();
auto verticies = edge.second->Verticies();
assert(jacobians.size() == verticies.size());
for (size_t i = 0; i < verticies.size(); ++i) {
auto v_i = verticies[i]; //i=0,逆深度 i=1,第0帧 i=2第j帧
if (v_i->IsFixed()) continue; // Hessian 里不需要添加它的信息,也就是它的雅克比为 0
auto jacobian_i = jacobians[i];//求对应雅克比
ulong index_i = v_i->OrderingId();//37
ulong dim_i = v_i->LocalDimension();//优化维度1, 6
MatXX JtW = jacobian_i.transpose() * edge.second->Information();//J^t×协防差的逆
for (size_t j = i; j < verticies.size(); ++j) {
auto v_j = verticies[j];//i=0,逆深度 i=1,第0帧 i=2第j帧
if (v_j->IsFixed()) continue;
auto jacobian_j = jacobians[j];//雅克比
ulong index_j = v_j->OrderingId();
ulong dim_j = v_j->LocalDimension();//优化维度1, 6
assert(v_j->OrderingId() != -1);
MatXX hessian = JtW * jacobian_j;//这边就是组成了信息阵,也就是H阵的块
// 所有的信息矩阵叠加起来
// TODO:: home work. 完成 H index 的填写.
// H.block(?,?, ?, ?).noalias() += hessian;
H.block(index_i,index_j, dim_i, dim_j).noalias() += hessian;
if (j != i) {
// 对称的下三角
// TODO:: home work. 完成 H index 的填写.
// H.block(?,?, ?, ?).noalias() += hessian.transpose();
H.block(index_j,index_i, dim_j, dim_i).noalias() += hessian.transpose();
}
}
b.segment(index_i, dim_i).noalias() -= JtW * edge.second->Residual();//-J^Tf
}
}
Hessian_ = H;
b_ = b;
t_hessian_cost_ += t_h.toc();
// Eigen::JacobiSVD<Eigen::MatrixXd> svd(H, Eigen::ComputeThinU | Eigen::ComputeThinV);
// std::cout << svd.singularValues() <<std::endl;
if (err_prior_.rows() > 0) {
b_prior_ -= H_prior_ * delta_x_.head(ordering_poses_); // update the error_prior
}
Hessian_.topLeftCorner(ordering_poses_, ordering_poses_) += H_prior_;
b_.head(ordering_poses_) += b_prior_;
delta_x_ = VecX::Zero(size); // initial delta_x = 0_n;
}
——————————————————————————————————————————————————————————————————————————————————————
对于残差的计算edge_reprojection.cpp对computeResidual进重写
void EdgeReprojection::ComputeResidual() {
//std::cout<<"PPPPPPPPPP"<<std::endl;
double inv_dep_i = verticies_[0]->Parameters()[0];//逆深度
//std::cout<<"inv="<<inv_dep_i<<std::endl;
VecX param_i = verticies_[1]->Parameters();
Qd Qi(param_i[6], param_i[3], param_i[4], param_i[5]);//第零帧四元数
Vec3 Pi = param_i.head<3>();//第零帧t
VecX param_j = verticies_[2]->Parameters();//第j帧四元数
Qd Qj(param_j[6], param_j[3], param_j[4], param_j[5]);
Vec3 Pj = param_j.head<3>();//第j帧t
Vec3 pts_camera_i = pts_i_ / inv_dep_i;//第零帧观测反投影到三维空间中
Vec3 pts_imu_i = qic * pts_camera_i + tic;//将这个点转到imu坐标系Ric*Pc+tic
Vec3 pts_w = Qi * pts_imu_i + Pi;//转到世界坐标系,
Vec3 pts_imu_j = Qj.inverse() * (pts_w - Pj);//转到imu坐标系
Vec3 pts_camera_j = qic.inverse() * (pts_imu_j - tic);//转到第j帧
double dep_j = pts_camera_j.z();
residual_ = (pts_camera_j / dep_j).head<2>() - pts_j_.head<2>(); /// J^t * J * delta_x = - J^t * r
// residual_ = information_ * residual_; // remove information here, we multi information matrix in problem solver
}
对与Hessian矩阵的构建主要是采用的是拼接,分别计算出每个边的hessian阵放入与Hessian阵维度相同的矩阵中,然后将每个hessian阵相加就能得到最终的Hessian阵,注意的是这里循环的边有40个,里面的存储格式如下:
边ID | 边的内容verticies_ |
---|---|
0 | [0逆,0帧,1帧] |
1 | [0逆,0帧,2帧] |
2 | [1逆,0帧,1帧] |
3 | [1逆,0帧,2帧] |
… | … |
39 | [19逆,0帧,2帧] |
至此大的H阵构建完毕。
接下来就是使用LM算法对pose和三维点进行优化
//对LM进行初始化,也就是初始化Lambda
void Problem::ComputeLambdaInitLM() {
ni_ = 2.;
currentLambda_ = -1.;
currentChi_ = 0.0;
// TODO:: robust cost chi2
for (auto edge: edges_) {
currentChi_ += edge.second->Chi2();//这个就是f(x)^T*协防差的逆*f(x)
}
if (err_prior_.rows() > 0) // marg prior residual
currentChi_ += err_prior_.norm();
stopThresholdLM_ = 1e-6 * currentChi_; // 迭代条件为 误差下降 1e-6 倍
double maxDiagonal = 0;
ulong size = Hessian_.cols();//38维
assert(Hessian_.rows() == Hessian_.cols() && "Hessian is not square");//判断H阵是否为方阵
for (ulong i = 0; i < size; ++i) {
maxDiagonal = std::max(fabs(Hessian_(i, i));// maxDiagonal);将lambda设置成和J^TJ对角线上最大的元素
}
double tau = 1e-5;
currentLambda_ = tau * maxDiagonal;//这个就是初始的lambda的值
}
设置完成后,下面开始进行LM迭代优化
// LM 算法迭代求解
bool stop = false;
int iter = 0;
while (!stop && (iter < iterations)) {// 判断是否到达迭代次数或者符合停止优化条件
std::cout << "iter: " << iter << " , chi= " << currentChi_ << " , Lambda= " << currentLambda_ << std::endl;
bool oneStepSuccess = false;
int false_cnt = 0;
while (!oneStepSuccess) // 不断尝试 Lambda, 直到成功迭代一步
{
// setLambda
// AddLambdatoHessianLM();
// 第四步,解线性方程
SolveLinearSystem();
//
// RemoveLambdaHessianLM();
// 优化退出条件1: delta_x_ 很小则退出
if (delta_x_.squaredNorm() <= 1e-6 || false_cnt > 10) {//判断是否符合停止迭代的条件
stop = true;
break;
}
// 更新状态量
UpdateStates();
// 判断当前步是否可行以及 LM 的 lambda 怎么更新
oneStepSuccess = IsGoodStepInLM();
// 后续处理,
if (oneStepSuccess) {
// 在新线性化点 构建 hessian
MakeHessian();
// TODO:: 这个判断条件可以丢掉,条件 b_max <= 1e-12 很难达到,这里的阈值条件不应该用绝对值,而是相对值
// double b_max = 0.0;
// for (int i = 0; i < b_.size(); ++i) {
// b_max = max(fabs(b_(i)), b_max);
// }
// // 优化退出条件2: 如果残差 b_max 已经很小了,那就退出
// stop = (b_max <= 1e-12);
false_cnt = 0;
} else {
false_cnt ++;
RollbackStates(); // 误差没下降,回滚
}
}
iter++;
// 优化退出条件3: currentChi_ 跟第一次的chi2相比,下降了 1e6 倍则退出
if (sqrt(currentChi_) <= stopThresholdLM_)
stop = true;
}
std::cout << "problem solve cost: " << t_solve.toc() << " ms" << std::endl;
std::cout << " makeHessian cost: " << t_hessian_cost_ << " ms" << std::endl;
return true;
}
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//开始求解线性方程
void Problem::SolveLinearSystem() {
if (problemType_ == ProblemType::GENERIC_PROBLEM) {//判断是不是SLAM问题,不是的话,直接求逆
// 非 SLAM 问题直接求解
// PCG solver
MatXX H = Hessian_;
for (ulong i = 0; i < Hessian_.cols(); ++i) {
H(i, i) += currentLambda_;
}
// delta_x_ = PCGSolver(H, b_, H.rows() * 2);
delta_x_ = Hessian_.inverse() * b_;
} else {
// SLAM 问题采用舒尔补的计算方式
// step1: schur marginalization --> Hpp, bpp
int reserve_size = ordering_poses_;//18
int marg_size = ordering_landmarks_;//20
// TODO:: home work. 完成矩阵块取值,Hmm,Hpm,Hmp,bpp,bmm
// MatXX Hmm = Hessian_.block(?,?, ?, ?);
// MatXX Hpm = Hessian_.block(?,?, ?, ?);
// MatXX Hmp = Hessian_.block(?,?, ?, ?);
// VecX bpp = b_.segment(?,?);
// VecX bmm = b_.segment(?,?);
MatXX Hmm = Hessian_.block(reserve_size,reserve_size, marg_size, marg_size);//路标对角阵,
MatXX Hmp = Hessian_.block(reserve_size,0, marg_size, reserve_size);//mp
MatXX Hpm = Hessian_.block(0, reserve_size, reserve_size, marg_size);//pm
VecX bpp = b_.segment(0,reserve_size);//0~17
VecX bmm = b_.segment(reserve_size,marg_size);//18~37
// Hmm 是对角线矩阵,它的求逆可以直接为对角线块分别求逆,如果是逆深度,对角线块为1维的,则直接为对角线的倒数,这里可以加速
MatXX Hmm_inv(MatXX::Zero(marg_size, marg_size));//路标点的对角阵的逆
for (auto landmarkVertex : idx_landmark_vertices_) {//开始遍历所有landmark,20
int idx = landmarkVertex.second->OrderingId() - reserve_size;//设置路标小h阵的id 0——19
int size = landmarkVertex.second->LocalDimension();//优化维度1
Hmm_inv.block(idx, idx, size, size) = Hmm.block(idx, idx, size, size).inverse();//将Hmm的对角线元素取逆放入Hmm_inv中
}
// TODO:: home work. 完成舒尔补 Hpp, bpp 代码
MatXX tempH = Hpm * Hmm_inv;
// H_pp_schur_ = Hessian_.block(?,?,?,?) - tempH * Hmp;
// b_pp_schur_ = bpp - ? * ?;
H_pp_schur_ = Hessian_.block(0,0,reserve_size,reserve_size) - tempH * Hmp;//Hpp-Hpm×Hmm的逆×Hmp
b_pp_schur_ = bpp - tempH * bmm;//b
// step2: solve Hpp * delta_x = bpp
VecX delta_x_pp(VecX::Zero(reserve_size));
// PCG Solver
for (ulong i = 0; i < ordering_poses_; ++i) {
H_pp_schur_(i, i) += currentLambda_;//加上LM算法中的lambda,路标点是满秩的
}
int n = H_pp_schur_.rows() * 2; // 迭代次数36
delta_x_pp = PCGSolver(H_pp_schur_, b_pp_schur_, n); // 哈哈,小规模问题,搞 pcg 花里胡哨
delta_x_.head(reserve_size) = delta_x_pp;//18解出来的是Xpp
// std::cout << delta_x_pp.transpose() << std::endl;
// TODO:: home work. step3: solve landmark
VecX delta_x_ll(marg_size);//这边就是解Hll,因为Hll维度大,乘后求逆效率低,因此采用舒尔补加速
// delta_x_ll = ???;
delta_x_ll = Hmm_inv*(bmm-Hmp*delta_x_pp);
delta_x_.tail(marg_size) = delta_x_ll;
}
}
——————————————————————————————————————————————————————————————————————————————————————————————
void Problem::UpdateStates() {//状态量进行更新
for (auto vertex: verticies_) {
ulong idx = vertex.second->OrderingId();//0-37顶点ID
ulong dim = vertex.second->LocalDimension();//优化维度
VecX delta = delta_x_.segment(idx, dim);
vertex.second->Plus(delta);//加上优化变量
}
if (err_prior_.rows() > 0) {
b_prior_ -= H_prior_ * delta_x_.head(ordering_poses_); // update the error_prior
err_prior_ = Jt_prior_inv_ * b_prior_.head(ordering_poses_ - 6);
}
}
————————————————————————————————————————————————————————————————————————————————————————————
bool Problem::IsGoodStepInLM() {
double scale = 0;
scale = delta_x_.transpose() * (currentLambda_ * delta_x_ + b_);//L(0)-L(delta x)
scale += 1e-3; // make sure it's non-zer//防止变为0
// recompute residuals after update state
// TODO:: get robustChi2() instead of Chi2()
double tempChi = 0.0;
for (auto edge: edges_) {//开始遍历所有残差 40
edge.second->ComputeResidual();//边的残差
tempChi += edge.second->Chi2();//f(x)*information*f(x)^T
}
if (err_prior_.size() > 0)
tempChi += err_prior_.norm();
double rho = (currentChi_ - tempChi) / scale;//当前残差-跟新后残差,判断是否大于0
if (rho > 0 && isfinite(tempChi)) // last step was good, 误差在下降
{//马夸尔特阻尼法
double alpha = 1. - pow((2 * rho - 1), 3);
alpha = std::min(alpha, 2. / 3.);
double scaleFactor = (std::max)(1. / 3., alpha);
currentLambda_ *= scaleFactor;
ni_ = 2;
currentChi_ = tempChi;
return true;
} else {
currentLambda_ *= ni_;
ni_ *= 2;
return false;
}
}