LiLi-OM源码阅读-ROT(四)——MarginalizationFactor.cpp部分

源码阅读,能力有限,如有某处理解错误,请指出,谢谢。

#include "factors/MarginalizationFactor.h"

void *ThreadsConstructA(void *threadsstruct) {
    ThreadsStruct *p = ((ThreadsStruct *) threadsstruct);
    //遍历该线程分配的所有factors,所有观测项
    for (auto it : p->sub_factors) {
    //遍历该factor中的所有参数块,五个参数块,分别计算,理解!
        for (int i = 0; i < static_cast<int>(it->parameter_blocks.size()); i++) {
            //得到参数块的大小
            int idx_i = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[i])];
            int size_i = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[i])];
            if (size_i == 4)//对于pose来说,是7维的,最后一维为0,这里取左边6
                size_i = 3;
            //只提取local size部分,对于pose来说,是7维的,最后一维为0,这里取左边6维
            //P.leftCols(cols) = P(:, 1:cols),取出从1列开始的cols列
            Eigen::MatrixXd jacobian_i = it->jacobians[i].rightCols(size_i);
            for (int j = i; j < static_cast<int>(it->parameter_blocks.size()); j++) {
                int idx_j = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[j])];
                int size_j = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[j])];
                if (size_j == 4)
                    size_j = 3;
                Eigen::MatrixXd jacobian_j = it->jacobians[j].rightCols(size_j);
                //对应对角区域,H*X=b, A代表H矩阵
                if (i == j)
                    p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
                else {
                //对应非对角区域
                    p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
                    p->A.block(idx_j, idx_i, size_j, size_i) = p->A.block(idx_i, idx_j, size_i, size_j).transpose();
                }
            }
            //求取g,Hx=g,都是根据公式来写程序的!
            p->b.segment(idx_i, size_i) += jacobian_i.transpose() * it->residuals;
        }
    }
    return threadsstruct;
}

void ResidualBlockInfo::Evaluate() {
    //获取残差的个数: IMU + Lidar
    residuals.resize(cost_function->num_residuals());
    //优化变量参数块的变量大小:para_Pose、para_SpeedBias、para_Ex_Pose、para_Feature、para_Td
    std::vector<int> block_sizes = cost_function->parameter_block_sizes();
    //数组外围的大小,也就是参数块的个数
    raw_jacobians = new double *[block_sizes.size()];
    jacobians.resize(block_sizes.size());
    //分配每一行的大小,残差的维数*每个参数块中参数的个数block_sizes[i],J矩阵大小的确认!
    //比如:两个残差f1,f2;5个变量x1,x2,,,x5, 则J矩阵是2行5列
    for (int i = 0; i < static_cast<int>(block_sizes.size()); i++) {
        jacobians[i].resize(cost_function->num_residuals(), block_sizes[i]);
        raw_jacobians[i] = jacobians[i].data();
    }
    //利用各自残差的Evaluate函数计算残差和雅克比矩阵。
    cost_function->Evaluate(parameter_blocks.data(), residuals.data(), raw_jacobians);
    //重写雅克比与残差
    if (loss_function) {
        double residual_scaling_, alpha_sq_norm_;

        double sq_norm, rho[3];

        sq_norm = residuals.squaredNorm();
        loss_function->Evaluate(sq_norm, rho);

        double sqrt_rho1_ = sqrt(rho[1]);

        if ((sq_norm == 0.0) || (rho[2] <= 0.0)) {
            residual_scaling_ = sqrt_rho1_;
            alpha_sq_norm_ = 0.0;
        } else {
            const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];
            const double alpha = 1.0 - sqrt(D);
            residual_scaling_ = sqrt_rho1_ / (1 - alpha);
            alpha_sq_norm_ = alpha / sq_norm;
        }
        //感觉像根据先验残差,推出新的残差和雅可比公式一样
        for (int i = 0; i < static_cast<int>(parameter_blocks.size()); i++) {
            jacobians[i] = sqrt_rho1_ * (jacobians[i] - alpha_sq_norm_ * residuals * (residuals.transpose() * jacobians[i]));

        }

        residuals *= residual_scaling_;
    }
}

MarginalizationInfo::~MarginalizationInfo() {
    for (auto it = parameter_block_data.begin(); it != parameter_block_data.end(); ++it)
        delete it->second;

    for (int i = 0; i < (int) factors.size(); i++) {

        delete[] factors[i]->raw_jacobians;

        delete factors[i]->cost_function;

        delete factors[i];
    }
}

//这里其实就是分别将不同损失函数对应的优化变量、边缘化位置存入到parameter_block_sizes和parameter_block_idx中,这里注意的是执行到这一步,parameter_block_idx中仅仅有待边缘化的优化变量的内存地址的key,而且其对应value全部为0;
void MarginalizationInfo::AddResidualBlockInfo(ResidualBlockInfo *residual_block_info) {
    factors.emplace_back(residual_block_info);

    std::vector<double *> &parameter_blocks = residual_block_info->parameter_blocks;//parameter_blocks里面放的是marg相关的变量
    std::vector<int> parameter_block_sizes = residual_block_info->cost_function->parameter_block_sizes();

    for (int i = 0; i < static_cast<int>(residual_block_info->parameter_blocks.size()); i++) //这里应该是优化的变量
    {
        double *addr = parameter_blocks[i];//指向数据的指针
        int size = parameter_block_sizes[i];//因为仅仅有地址不行,还需要有地址指向的这个数据的长度
        parameter_block_size[reinterpret_cast<long>(addr)] = size;//将指针强转为数据的地址
    }

    if(residual_block_info->drop_set.size() == 0) return;

    for (int i = 0; i < static_cast<int>(residual_block_info->drop_set.size()); i++) //这里应该是待边缘化的变量
    {
        double *addr = parameter_blocks[residual_block_info->drop_set[i]];//这个是待边缘化的变量的id
        parameter_block_idx[reinterpret_cast<long>(addr)] = 0;//将需要marg的变量的id存入parameter_block_idx
    }
}

//进行预处理
void MarginalizationInfo::PreMarginalize() {
    for (auto it : factors)//在前面的addResidualBlockInfo中会将不同的残差块加入到factor中
     {
        it->Evaluate();//利用多态性分别计算所有状态变量构成的残差和雅克比矩阵

        std::vector<int> block_sizes = it->cost_function->parameter_block_sizes();
        for (int i = 0; i < static_cast<int>(block_sizes.size()); i++) {
            long addr = reinterpret_cast<long>(it->parameter_blocks[i]);//优化变量的地址
            int size = block_sizes[i];
            if (parameter_block_data.find(addr) == parameter_block_data.end()) //parameter_block_data是整个优化变量的数据
            {
                double *data = new double[size];
                memcpy(data, it->parameter_blocks[i], sizeof(double) * size);//重新开辟一块内存
                parameter_block_data[addr] = data;//通过之前的优化变量的数据的地址和新开辟的内存数据进行关联
            }
        }
    }
}

int MarginalizationInfo::LocalSize(int size) const {
    return size == 4 ? 3 : size;
}

void MarginalizationInfo::Marginalize() {
    int pos = 0;
    for (auto &it : parameter_block_idx) //遍历待marg的优化变量的内存地址
    {
        it.second = pos;
        pos += LocalSize(parameter_block_size[it.first]);
    }

    m = pos;//需要marg掉的变量个数

    for (const auto &it : parameter_block_size)
     {
        if (parameter_block_idx.find(it.first) == parameter_block_idx.end()) //如果这个变量不是是待marg的优化变量
        {
            parameter_block_idx[it.first] = pos;//就将这个待marg的变量id设为pos
            pos += LocalSize(it.second);//pos加上这个变量的长度
        }
    }

    n = pos - m;//要保留下来的变量个数

    //通过上面的操作就会将所有的优化变量进行一个伪排序,待marg的优化变量的idx为0,其他的和起所在的位置相关

    Eigen::MatrixXd A(pos, pos);//整个矩阵A的大小
    Eigen::VectorXd b(pos);
    A.setZero();
    b.setZero();

    pthread_t tids[NUM_THREADS];
    ThreadsStruct threadsstruct[NUM_THREADS];
    int i = 0;
    for (auto it : factors) //将各个残差块的雅克比矩阵分配到各个线程中去
    {
        threadsstruct[i].sub_factors.push_back(it);
        i++;
        i = i % NUM_THREADS;
    }
    for (int i = 0; i < NUM_THREADS; i++) {
        threadsstruct[i].A = Eigen::MatrixXd::Zero(pos, pos);
        threadsstruct[i].b = Eigen::VectorXd::Zero(pos);
        threadsstruct[i].parameter_block_size = parameter_block_size;
        threadsstruct[i].parameter_block_idx = parameter_block_idx;
        int ret = pthread_create(&tids[i], NULL, ThreadsConstructA, (void *) &(threadsstruct[i]));//分别构造矩阵
        if (ret != 0)
        if (ret != 0) {
            ROS_DEBUG("pthread_create error");
            ROS_BREAK();
        }
    }
    for (int i = NUM_THREADS - 1; i >= 0; i--) {
        pthread_join(tids[i], NULL);
        A += threadsstruct[i].A;
        b += threadsstruct[i].b;
    }

    //TODO
    Eigen::MatrixXd Amm = 0.5 * (A.block(0, 0, m, m) + A.block(0, 0, m, m).transpose());
    Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes(Amm);


    Eigen::MatrixXd Amm_inv = saes.eigenvectors()
            * Eigen::VectorXd((saes.eigenvalues().array() > eps).select(saes.eigenvalues().array().inverse(), 0)).asDiagonal()
            * saes.eigenvectors().transpose();

    //舒尔补
    Eigen::VectorXd bmm = b.segment(0, m);
    Eigen::MatrixXd Amr = A.block(0, m, m, n);
    Eigen::MatrixXd Arm = A.block(m, 0, n, m);
    Eigen::MatrixXd Arr = A.block(m, m, n, n);
    Eigen::VectorXd brr = b.segment(m, n);
    A = Arr - Arm * Amm_inv * Amr;
    b = brr - Arm * Amm_inv * bmm;//这里的A和b应该都是marg过的A和b,大小是发生了变化的

    //下面就是更新先验残差项
    Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes2(A);//求特征值
    Eigen::VectorXd S = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array(), 0));
    Eigen::VectorXd
            S_inv = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array().inverse(), 0));

    Eigen::VectorXd S_sqrt = S.cwiseSqrt();
    Eigen::VectorXd S_inv_sqrt = S_inv.cwiseSqrt();

    linearized_jacobians = S_sqrt.asDiagonal() * saes2.eigenvectors().transpose();
    linearized_residuals = S_inv_sqrt.asDiagonal() * saes2.eigenvectors().transpose() * b;
}

std::vector<double *> MarginalizationInfo::GetParameterBlocks(std::unordered_map<long, double *> &addr_shift) {
    std::vector<double *> keep_block_addr;
    keep_block_size.clear();
    keep_block_idx.clear();
    keep_block_data.clear();

    for (const auto &it : parameter_block_idx) {
        if (it.second >= m) {
            keep_block_size.push_back(parameter_block_size[it.first]);
            keep_block_idx.push_back(parameter_block_idx[it.first]);
            keep_block_data.push_back(parameter_block_data[it.first]);
            keep_block_addr.push_back(addr_shift[it.first]);
        }
    }
    sum_block_size = std::accumulate(std::begin(keep_block_size), std::end(keep_block_size), 0);

    return keep_block_addr;
}

MarginalizationFactor::MarginalizationFactor(MarginalizationInfo *_marginalization_info) : marginalization_info(
                                                                                               _marginalization_info) {
    int cnt = 0;
    for (auto it : marginalization_info->keep_block_size) {
        mutable_parameter_block_sizes()->push_back(it);
        cnt += it;
    }
    set_num_residuals(marginalization_info->n);
};

bool MarginalizationFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const {
    int n = marginalization_info->n;
    int m = marginalization_info->m;
    Eigen::VectorXd dx(n);
    //遍历本次边缘化保留下的参数块:keep_block_size
    for (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++)
     {
        //得到该参数块的大小和序号
        int size = marginalization_info->keep_block_size[i];
        int idx = marginalization_info->keep_block_idx[i] - m;
        //x 感觉是边缘化之前 BA优化后的值 ?
        Eigen::VectorXd x = Eigen::Map<const Eigen::VectorXd>(parameters[i], size);
        //x0表示marg时参数块变量的值(即xb)
        Eigen::VectorXd x0 = Eigen::Map<const Eigen::VectorXd>(marginalization_info->keep_block_data[i], size);
        if (size != 4)
         {
            dx.segment(idx, size) = x - x0;//变量的更新
        }
        else //位置,姿态项, 姿态不能直接加减了!要变成四元素的 ✖
        {
            //使用四元数的表达方式
            dx.segment<3>(idx) = 2.0 * (Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse()
                                        * Eigen::Quaterniond(x(0), x(1), x(2), x(3))).normalized().vec();
            if ((Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse() * Eigen::Quaterniond(x(0), x(1), x(2), x(3))).w()
                    < 0) {
                dx.segment<3>(idx) = -2.0 * (Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse()
                                             * Eigen::Quaterniond(x(0), x(1), x(2), x(3))).normalized().vec();
            }
        }
    }
    //计算残差,理解,泰勒展开求的公式
    Eigen::Map<Eigen::VectorXd>(residuals, n) =
            marginalization_info->linearized_residuals + marginalization_info->linearized_jacobians * dx;
    //仅在边缘化残差这一块,雅克比矩阵要固定?
    if (jacobians) {

        for (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++) {
            if (jacobians[i]) {

                int size = marginalization_info->keep_block_size[i], local_size = marginalization_info->LocalSize(size);
                int idx = marginalization_info->keep_block_idx[i] - m;
                //
                Eigen::VectorXd x = Eigen::Map<const Eigen::VectorXd>(parameters[i], size);
                Eigen::VectorXd x0 = Eigen::Map<const Eigen::VectorXd>(marginalization_info->keep_block_data[i], size);
                //
                Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
                        jacobian(jacobians[i], n, size);
                jacobian.setZero();
                if(size != 4)
                    jacobian.rightCols(local_size) = marginalization_info->linearized_jacobians.middleCols(idx, local_size);
                else {
                    if ((Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse() * Eigen::Quaterniond(x(0), x(1), x(2), x(3))).w() >= 0)
                        jacobian.rightCols(size) = 2.0 * marginalization_info->linearized_jacobians.middleCols(idx, local_size) *
                                Qleft(Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse()).bottomRightCorner<3, 4>();
                    else
                        jacobian.rightCols(size) = -2.0 * marginalization_info->linearized_jacobians.middleCols(idx, local_size) *
                                Qleft(Eigen::Quaterniond(x0(0), x0(1), x0(2), x0(3)).inverse()).bottomRightCorner<3, 4>();
                }
            }
        }
    }
    return true;
}

参考链接:

VINS-Mono 代码解析六、边缘化(1)_贵在坚持,不忘初心的博客-CSDN博客

VINS-Mono 代码解析六、边缘化(2)理论和代码详解_贵在坚持,不忘初心的博客-CSDN博客_vins边缘化代码详解VINS-Mono 代码解析六、边缘化(3)_贵在坚持,不忘初心的博客-CSDN博客_vinsmono 边缘化

区别:

VINS-Mono使用滑动窗口优化处理的是IMU和camera之间的关系,而这里则是用滑动窗口优化处理的是IMU和Lidar之间的关系,通过选取角特征点和面特征点作为滑动窗口优化中的被观测点,lidar关键帧和imu测量数据作为观测点,这样就可以搭建约束关系,被边缘化的lidar关键帧和imu测量数据与滑动窗口内的lidar关键帧和imu测量数据间的约束,然后持续进行窗口优化。

评论 13
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值