SLAM直接法代码注释

#include <opencv2/opencv.hpp>
#include "sophus/se3.hpp"
#include <boost/format.hpp>
#include <pangolin/pangolin.h>
using namespace std;
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
// Camera intrinsicsdouble 
fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;
// baselinedouble 
baseline = 0.573;
// paths
string left_file = "./left.png";
string disparity_file = "./disparity.png";boost::format fmt_others("./%06d.png"); // other files
// useful typedefs
typedef Eigen::Matrix<double, 6, 6> Matrix6d;
typedef Eigen::Matrix<double, 2, 6> Matrix26d;
typedef Eigen::Matrix<double, 6, 1> Vector6d;
/// class for accumulator jacobians in parallel
class JacobianAccumulator {
public:    JacobianAccumulator(        
const cv::Mat &img1_,        
const cv::Mat &img2_,       
const VecVector2d &px_ref_,        
const vector<double> depth_ref_,        
Sophus::SE3d &T21_) :        
img1(img1_), img2(img2_), px_ref(px_ref_), depth_ref(depth_ref_), T21(T21_) {        projection = VecVector2d(px_ref.size(), Eigen::Vector2d(0, 0));    }
    /// accumulate jacobians in a range  雅可比矩阵   	   
    void accumulate_jacobian(const cv::Range &range);
    /// get hessian matrix  海塞矩阵    
    Matrix6d hessian() const { return H; }
    /// get bias    
    Vector6d bias() const { return b; }
    /// get total cost    
    double cost_func() const { return cost; }
    /// get projected points    
    VecVector2d projected_points() const { return projection; }
    /// reset h, b, cost to zero   
     void reset() {        
     H = Matrix6d::Zero();       
     b = Vector6d::Zero();        
     cost = 0;    }
private:    
	const cv::Mat &img1;    
	const cv::Mat &img2;    
	const VecVector2d &px_ref;    
	const vector<double> depth_ref;    
	Sophus::SE3d &T21;    
	VecVector2d projection; // projected points
	std::mutex hessian_mutex;    
        Matrix6d H = Matrix6d::Zero();    
        Vector6d b = Vector6d::Zero();    
        double cost = 0;};
/** * pose estimation using direct method * @param img1 * @param img2 * @param px_ref * @param depth_ref * @param T21 */
void DirectPoseEstimationSingleLayer(    
	const cv::Mat &img1,    
	const cv::Mat &img2,    
	const VecVector2d &px_ref,    
	const vector<double> depth_ref,    
	Sophus::SE3d &T21);
// bilinear interpolation 双线性插值
inline float GetPixelValue(const cv::Mat &img, float x, float y) {   
 // boundary check 边界检查    
 	if (x < 0) x = 0;    
 	if (y < 0) y = 0;    
 	if (x >= img.cols) x = img.cols - 1;    
 	if (y >= img.rows) y = img.rows - 1;    
 	uchar *data = &img.data[int(y) * img.step + int(x)];    
 	float xx = x - floor(x); //floor向下取整    
 	float yy = y - floor(y);    
 	return float(        
 		(1 - xx) * (1 - yy) * data[0] +  xx * (1 - yy) * data[1] +  (1 - xx) * yy * 				data[img.step] +  xx * yy * data[img.step + 1]  );}
 		
int main(int argc, char **argv) {    
//读取图片文件    
cv::Mat left_img = cv::imread(left_file, 0);    
cv::Mat disparity_img = cv::imread(disparity_file, 0);
    // let's randomly pick pixels in the first image and generate some 3d points in the first image's frame    
    // 让我们随机选择第一张图像中的像素,并在第一张图像的帧中生成一些3d点    
    cv::RNG rng;    
    int nPoints = 2000;    
    int boarder = 20;    
    VecVector2d pixels_ref;    
    vector<double> depth_ref;
    // generate pixels in ref and load depth data    
    在参考和加载深度数据中生成像素    
    for (int i = 0; i < nPoints; i++) {        
    	int x = rng.uniform(boarder, left_img.cols - boarder);  
    	// don't pick pixels close to boarder uniform返回指定范围的随机数        
    	int y = rng.uniform(boarder, left_img.rows - boarder);  
    	// don't pick pixels close to boarder        
    	int disparity = disparity_img.at<uchar>(y, x); //将相应的像素值复制到样本集中
    	double depth = fx * baseline / disparity; 
    	// you know this is disparity to depth        
    	depth_ref.push_back(depth);        
    	pixels_ref.push_back(Eigen::Vector2d(x, y));    }
    // estimates 01~05.png's pose using this information    
    使用此信息估算01〜05.png的位姿     
    Sophus::SE3d T_cur_ref;
    for (int i = 1; i < 6; i++) {// 1~10        
    cv::Mat img = cv::imread((fmt_others % i).str(), 0);       
     // try single layer by uncomment this line      
     //通过取消注释此行来尝试单层        
     // DirectPoseEstimationSingleLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);
        DirectPoseEstimationMultiLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);    
        }    
        return 0;}
        
void DirectPoseEstimationSingleLayer(    
	const cv::Mat &img1,    
	const cv::Mat &img2,    
	const VecVector2d &px_ref,    
	const vector<double> depth_ref,    
	Sophus::SE3d &T21) {
    		const int iterations = 10;    
    		double cost = 0, lastCost = 0;    
    		auto t1 = chrono::steady_clock::now();    //计算雅可比矩阵    
    		JacobianAccumulator jaco_accu(img1, img2, px_ref, depth_ref, T21);
    for (int iter = 0; iter < iterations; iter++) {
    	jaco_accu.reset();  /// reset h, b, cost to zero        
    	//并行计算函数 parallel_for_        
    	cv::parallel_for_(cv::Range(0, px_ref.size()),
    	std::bind(&JacobianAccumulator::accumulate_jacobian, &jaco_accu,
    	std::placeholders::_1));        
    	Matrix6d H = jaco_accu.hessian();        
    	Vector6d b = jaco_accu.bias();
        // solve update and put it into estimation         // 解决更新并将其估算    
    Vector6d update = H.ldlt().solve(b); //解算update        
    T21 = Sophus::SE3d::exp(update) * T21;  //更新变化矩阵        
    cost = jaco_accu.cost_func(); //get total cost
        if (std::isnan(update[0])) {            
        // sometimes occurred when we have a black or white patch and H is irreversible   
        // 当在我们有黑色或白色色块且H不可逆时发生            
        cout << "update is nan" << endl;           
         break;        
         }        
     if (iter > 0 && cost > lastCost) {            
     cout << "cost increased: " << cost << ", " << lastCost << endl;            
     	break;        
     	}        
    if (update.norm() < 1e-3) {            
    // converge           
     break;        
     }
        lastCost = cost;        
        cout << "iteration: " << iter << ", cost: " << cost << endl;    
        }    
    //输出变换矩阵T21,花费时间    
    cout << "T21 = \n" << T21.matrix() << endl;    
    auto t2 = chrono::steady_clock::now();    
    auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);    
    cout << "direct method for single layer: " << time_used.count() << endl;
    // plot the projected pixels here    
    // 在此处绘制投影像素    
    cv::Mat img2_show;    
    cv::cvtColor(img2, img2_show, CV_GRAY2BGR);    
    VecVector2d projection = jaco_accu.projected_points();    
    for (size_t i = 0; i < px_ref.size(); ++i) {        
    	auto p_ref = px_ref[i];        
    	auto p_cur = projection[i];        
    	if (p_cur[0] > 0 && p_cur[1] > 0) {            
    	cv::circle(img2_show, cv::Point2f(p_cur[0], p_cur[1]), 2, cv::Scalar(0, 250, 0), 2);            
    	cv::line(img2_show, cv::Point2f(p_ref[0], p_ref[1]), cv::Point2f(p_cur[0], p_cur[1]),                     
    	cv::Scalar(0, 250, 0));        }    
    	}    
    	
    	cv::imshow("current", img2_show);    
    	cv::waitKey();}
    	
void JacobianAccumulator::accumulate_jacobian(const cv::Range &range) {
    // parameters    
    const int half_patch_size = 1;    
    int cnt_good = 0;    
    Matrix6d hessian = Matrix6d::Zero();    
    Vector6d bias = Vector6d::Zero();    
    double cost_tmp = 0;
    for (size_t i = range.start; i < range.end; i++) {
        // compute the projection in the second image        
        计算第二张图片中的投影        
        Eigen::Vector3d point_ref =depth_ref[i] * Eigen::Vector3d((px_ref[i][0] - cx) / fx, (px_ref[i][1] - cy) / fy, 1);        
        Eigen::Vector3d point_cur = T21 * point_ref;        if (point_cur[2] < 0)   // depth invalid            
        continue;
        float u = fx * point_cur[0] / point_cur[2] + cx, v = fy * point_cur[1] / point_cur[2] + cy;//->归一化->像素坐标        
        if (u < half_patch_size || u > img2.cols - half_patch_size || v < half_patch_size ||            v > img2.rows - half_patch_size)            
        continue;
        projection[i] = Eigen::Vector2d(u, v);        
        double X = point_cur[0], Y = point_cur[1], Z = point_cur[2],            
        Z2 = Z * Z, Z_inv = 1.0 / Z, Z2_inv = Z_inv * Z_inv;        
        cnt_good++;
        // and compute error and jacobian        
        for (int x = -half_patch_size; x <= half_patch_size; x++) //定义窗口大小            
        for (int y = -half_patch_size; y <= half_patch_size; y++) {
                double error = GetPixelValue(img1, px_ref[i][0] + x, px_ref[i][1] + y) -                               
                GetPixelValue(img2, u + x, v + y); //计算两幅图像的插值                
                Matrix26d J_pixel_xi;                
                Eigen::Vector2d J_img_pixel;
                J_pixel_xi(0, 0) = fx * Z_inv;                
                J_pixel_xi(0, 1) = 0;                
                J_pixel_xi(0, 2) = -fx * X * Z2_inv;                
                J_pixel_xi(0, 3) = -fx * X * Y * Z2_inv;                
                J_pixel_xi(0, 4) = fx + fx * X * X * Z2_inv;                
                J_pixel_xi(0, 5) = -fx * Y * Z_inv;
                J_pixel_xi(1, 0) = 0;                
                J_pixel_xi(1, 1) = fy * Z_inv;                
                J_pixel_xi(1, 2) = -fy * Y * Z2_inv;                
                J_pixel_xi(1, 3) = -fy - fy * Y * Y * Z2_inv;                
                J_pixel_xi(1, 4) = fy * X * Y * Z2_inv;                
                J_pixel_xi(1, 5) = fy * X * Z_inv;
                J_img_pixel = Eigen::Vector2d(                    
                0.5 * (GetPixelValue(img2, u + 1 + x, v + y) - GetPixelValue(img2, u - 1 + x, v + y)),  //计算像素梯度                    
                0.5 * (GetPixelValue(img2, u + x, v + 1 + y) - GetPixelValue(img2, u + x, v - 1 + y))                );
                // total jacobian                
                Vector6d J = -1.0 * (J_img_pixel.transpose() * J_pixel_xi).transpose();
                hessian += J * J.transpose();               
                 bias += -error * J;                
                 cost_tmp += error * error;            
                 }    
                 }
    if (cnt_good) {        
    // set hessian, bias and cost        
    unique_lock<mutex> lck(hessian_mutex);        
    H += hessian;        
    b += bias;        
    cost += cost_tmp / cnt_good;    }}
    
void DirectPoseEstimationMultiLayer(    
const cv::Mat &img1,    
const cv::Mat &img2,    
const VecVector2d &px_ref,    
const vector<double> depth_ref,    
Sophus::SE3d &T21) {
    // parameters    
    int pyramids = 4; //缩放图层层数    
    double pyramid_scale = 0.5; //缩放比例    
    double scales[] = {1.0, 0.5, 0.25, 0.125};
    // create pyramids    
    vector<cv::Mat> pyr1, pyr2; // image pyramids    
    for (int i = 0; i < pyramids; i++) {       
    	if (i == 0) {            
    	pyr1.push_back(img1);            
    	pyr2.push_back(img2);        
    	} else {            
    	cv::Mat img1_pyr, img2_pyr;            
    	cv::resize(pyr1[i - 1], img1_pyr,                       
    	cv::Size(pyr1[i - 1].cols * pyramid_scale, pyr1[i - 1].rows * pyramid_scale));            
    	cv::resize(pyr2[i - 1], img2_pyr,                       
    	cv::Size(pyr2[i - 1].cols * pyramid_scale, pyr2[i - 1].rows * pyramid_scale));            
    	pyr1.push_back(img1_pyr);            
    	pyr2.push_back(img2_pyr);        
    	}    
    	}
    double fxG = fx, fyG = fy, cxG = cx, cyG = cy; // backup the old values    
    for (int level = pyramids - 1; level >= 0; level--) {        
    VecVector2d px_ref_pyr; // set the keypoints in this pyramid level        
    for (auto &px: px_ref) {            
    	px_ref_pyr.push_back(scales[level] * px); //缩放像素        }
        // scale fx, fy, cx, cy in different pyramid levels        
        fx = fxG * scales[level];        
        fy = fyG * scales[level];        
        cx = cxG * scales[level];        
        cy = cyG * scales[level];        
        DirectPoseEstimationSingleLayer(pyr1[level], pyr2[level], px_ref_pyr, depth_ref, T21);    }
}
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

秃头队长

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值