cam_lidar_calibration代码详解(三)点云重投影部分

12 篇文章 1 订阅

目录

一、主要函数如下:

1.1 计算平均值和标准差

1.2计算投影误差

1.3点云投影到图像并可视化

1.4求点云变换矩阵


assess_node节点文件src/assess_calibration.cpp

用途:读取标定结果,提前一组图像/点云数据进行重投影,计算lidar——>camera变换矩阵,对点云进行投影和可视化


一、主要函数如下:

 

1.1 计算平均值和标准差

void get_mean_stdev(std::vector<float>& input_vec, float& mean, float& stdev)
        {
            float sum = std::accumulate(std::begin(input_vec), std::end(input_vec), 0.0);   //函数的第一个参数是输入向量的起始迭代器,第二个参数是输入向量的结束迭代器,第三个参数是初始值,这里是0.0
            mean =  sum / input_vec.size();

            // 这段代码的作用是计算input_vec中每个元素与平均值的差的平方,并将结果累加到accum变量中。
            float accum = 0.0;
            std::for_each (std::begin(input_vec), std::end(input_vec), [&](const float d) {
                accum += (d - mean) * (d - mean);
            });

            stdev = sqrt(accum / (input_vec.size()-1));
        }

1.2计算投影误差

float compute_reprojection(OptimisationSample sample, std::vector<cv::Point2d> &cam, std::vector<cv::Point2d> &lidar)
        {
            cv::Mat rvec = cv::Mat_<double>::zeros(3, 1);
            cv::Mat tvec = cv::Mat_<double>::zeros(3, 1);

            std::vector<cv::Point3d> cam_centre_3d;
            std::vector<cv::Point3d> lidar_centre_3d;

            // Need to rotate the lidar points to the camera frame if we want to project it into the image 
            // Cause otherwise, projectPoints function doesn't know how to project points that are in the lidar frame (duh!)
            cv::Point3d lidar_centre_camera_frame = cv::Point3d(rot_trans * sample.lidar_centre);
            cam_centre_3d.push_back(sample.camera_centre);
            lidar_centre_3d.push_back(lidar_centre_camera_frame);

            // ROS_INFO_STREAM("Camera distortion model = " << distortion_model);
            std::vector<cv::Point2d> cam_dist, lidar_dist;

            if (distortion_model == "fisheye")
            {
                cv::fisheye::projectPoints(cam_centre_3d, cam, rvec, tvec, cameramat, distcoeff);
                cv::fisheye::projectPoints(lidar_centre_3d, lidar, rvec, tvec, cameramat, distcoeff);
            }
            else
            {
                cv::projectPoints(cam_centre_3d, rvec, tvec, cameramat, distcoeff, cam);
                cv::projectPoints(lidar_centre_3d, rvec, tvec, cameramat, distcoeff, lidar);
            }

            float pixel_error = cv::norm(cam[0] - lidar[0]);
            return pixel_error;
        }

1.3点云投影到图像并可视化

实现点云投影到图像的可视化,对应命令行控制visualise=true,投影时使用的图像和点云标号为16,由launch文件最后一行visualise_pose_num控制

void results_and_visualise () 
        {    
            std::printf("\n---- Calculating average reprojection error on %d samples ---- \n", sample_list.size());
            // Calculate mean and stdev of pixel error across all test samples
            std::vector<float> pix_err, pix_errmm;
            for (int i = 0; i < sample_list.size(); i++)
            {
                std::vector<cv::Point2d> cam, lidar;
                float pe = compute_reprojection(sample_list[i], cam, lidar);
                pix_err.push_back(pe);
                pix_errmm.push_back(pe*sample_list[i].pixeltometre*1000);

                // Get board dimension error - hardcoded board dims for now
                double w0_diff = abs(sample_list[i].widths[0] - board_dimensions.width);
                double w1_diff = abs(sample_list[i].widths[1] - board_dimensions.width);
                double h0_diff = abs(sample_list[i].heights[0] - board_dimensions.height);
                double h1_diff = abs(sample_list[i].heights[1] - board_dimensions.height);
                double be_dim_err = w0_diff + w1_diff + h0_diff + h1_diff;
                std::printf(" %3d/%3d | dist=%6.3fm, dimerr=%8.3fmm | error: %7.3fpix  --> %7.3fmm\n", i+1, sample_list.size(), sample_list[i].distance_from_origin, be_dim_err, pe, pe*sample_list[i].pixeltometre*1000);
            }   
            float mean_pe, stdev_pe, mean_pemm, stdev_pemm;
            get_mean_stdev(pix_err, mean_pe, stdev_pe);
            get_mean_stdev(pix_errmm, mean_pemm, stdev_pemm);
            printf("\nCalibration params (roll,pitch,yaw,x,y,z): %6.4f,%6.4f,%6.4f,%6.4f,%6.4f,%6.4f\n", param_msg->data[0],param_msg->data[1],param_msg->data[2],param_msg->data[3],param_msg->data[4],param_msg->data[5]);
            printf("\nMean reprojection error across  %d samples\n", sample_list.size());
            std::printf("- Error (pix) = %6.3f pix, stdev = %6.3f\n", mean_pe, stdev_pe);
            std::printf("- Error (mm)  = %6.3f mm , stdev = %6.3f\n\n\n", mean_pemm, stdev_pemm);
            
            if (visualise)
            {
                std::string image_path = data_dir + "/images/pose" + std::to_string(visualise_pose_num) + ".png";
                std::string pcd_path = data_dir + "/pcd/pose" + std::to_string(visualise_pose_num) + "_full.pcd";
                
                //  Project the two centres onto an image
                std::vector<cv::Point2d> cam_project, lidar_project;
                cv::Mat image = cv::imread(image_path, cv::IMREAD_COLOR);
                 if (image.empty()) {
                    ROS_ERROR_STREAM("Could not read image file, check if image exists at: " << image_path);
                }

                pcl::PointCloud<pcl::PointXYZIR>::Ptr og_cloud(new pcl::PointCloud<pcl::PointXYZIR>);
                pcl::PointCloud<pcl::PointXYZIR>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZIR>);
                if(pcl::io::loadPCDFile<pcl::PointXYZIR> (pcd_path, *og_cloud) == -1)
                {
                    ROS_ERROR_STREAM("Could not read pcd file, check if pcd file exists at: " << pcd_path);
                } else {
                    sensor_msgs::PointCloud2 cloud_msg;
                    pcl::toROSMsg(*og_cloud, cloud_msg);

                    sensor_msgs::PointCloud2 cloud_tf;
                    tf2::doTransform(cloud_msg, cloud_tf, tf_msg);
                    pcl::fromROSMsg(cloud_tf, *cloud);

                    if ( cloud->points.size() ) {
                        
                        for (pcl::PointCloud<pcl::PointXYZIR>::const_iterator it = cloud->begin(); it != cloud->end(); it++) {
                            double tmpxC = it->x / it->z;
                            double tmpyC = it->y / it->z;
                            double tmpzC = it->z;
                            double dis = pow(it->x * it->x + it->y * it->y + it->z * it->z, 0.5);
                            cv::Point2d planepointsC;
                            int range = std::min(round((dis / 30.0) * 49), 49.0);

                            // Applying the distortion
                            double r2 = tmpxC * tmpxC + tmpyC * tmpyC;
                            double r1 = pow(r2, 0.5);
                            double a0 = std::atan(r1);
                            double a1;
                            a1 = a0 * (1 + distcoeff.at<double>(0) * pow(a0, 2) + distcoeff.at<double>(1) * pow(a0, 4) +
                                    distcoeff.at<double>(2) * pow(a0, 6) + distcoeff.at<double>(3) * pow(a0, 8));
                            planepointsC.x = (a1 / r1) * tmpxC;
                            planepointsC.y = (a1 / r1) * tmpyC;

                            planepointsC.x = cameramat.at<double>(0, 0) * planepointsC.x + cameramat.at<double>(0, 2);
                            planepointsC.y = cameramat.at<double>(1, 1) * planepointsC.y + cameramat.at<double>(1, 2);

                            if (planepointsC.y >= 0 and planepointsC.y < height and planepointsC.x >= 0 and planepointsC.x < width and
                                tmpzC >= 0 and std::abs(tmpxC) <= 1.35) {

                                int point_size = 2;
                                cv::circle(image,
                                    cv::Point(planepointsC.x, planepointsC.y), point_size,
                                    CV_RGB(255 * colmap[50-range][0], 255 * colmap[50-range][1], 255 * colmap[50-range][2]), -1);
                            }
                        }
                    }
                }
                ROS_INFO_STREAM("Projecting points onto image for pose #" << (visualise_pose_num));
                compute_reprojection(sample_list[visualise_pose_num-1], cam_project, lidar_project);
                for (auto& point : cam_project)
                {
                    cv::circle(image, point, 15, CV_RGB(0, 255, 0), 2);
                    cv::drawMarker(image, point, CV_RGB(0,255,0), cv::MARKER_CROSS, 25, 2, cv::LINE_8);
                }
                for (auto& point : lidar_project)
                {
                    cv::circle(image, point, 15, CV_RGB(255, 255, 0), 2);
                    cv::drawMarker(image, point, CV_RGB(255,255,0), cv::MARKER_TILTED_CROSS, 20, 2, cv::LINE_8);
                }
                cv::Mat resized_img;
                cv::resize(image, resized_img, cv::Size(), 0.75, 0.75);
                cv::imshow("Reprojection", resized_img);        
                cv::waitKey(0);
            }
        }

1.4求点云变换矩阵

前面标定的时候求解的是相机到雷达的变换矩阵,现在要把点云投影到图像,那就要求一波矩阵的逆。

        void param_msg_callback() {

            // Need to inverse the transforms
            // In calibration, we figured out the transform to make camera into lidar frame (here we do opposite)
            // Here we apply transform to lidar i.e. tf(lidar) (parent) -----> camera (child)
            tf2::Transform transform;
            tf2::Quaternion quat;
            tf2::Vector3 trans;
            quat.setRPY(param_msg->data[0],param_msg->data[1],param_msg->data[2]);
            trans.setX(param_msg->data[3]);
            trans.setY(param_msg->data[4]);
            trans.setZ(param_msg->data[5]);
            transform.setRotation(quat);
            transform.setOrigin(trans);
            // ROS_INFO("Inverting rotation and translation for projecting LiDAR points into camera image");
            tf_msg.transform.rotation.w = transform.inverse().getRotation().w();
            tf_msg.transform.rotation.x = transform.inverse().getRotation().x();
            tf_msg.transform.rotation.y = transform.inverse().getRotation().y();
            tf_msg.transform.rotation.z = transform.inverse().getRotation().z();
            tf_msg.transform.translation.x = transform.inverse().getOrigin().x();
            tf_msg.transform.translation.y = transform.inverse().getOrigin().y();
            tf_msg.transform.translation.z = transform.inverse().getOrigin().z();
            
            double r_val,y_val,p_val;
            double d1,d2,d3;
            geometry_msgs::Quaternion q = tf_msg.transform.rotation;
            tf::Quaternion tfq;
            tf::quaternionMsgToTF(q, tfq);
            tf::Matrix3x3(tfq).getEulerYPR(y_val,p_val,r_val);
            rot_trans.x = tf_msg.transform.translation.x * 1000;
            rot_trans.y = tf_msg.transform.translation.y * 1000;
            rot_trans.z = tf_msg.transform.translation.z * 1000;
            rot_trans.rot.roll = r_val;
            rot_trans.rot.pitch = p_val;
            rot_trans.rot.yaw = y_val;

            results_and_visualise();
        }

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

可见一班

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值