rosbag从.bag文件中提取图片和点云数据

提取图片

  • 查看.bag文件的信息
rosbag info xxx.bag
  • /home/stone/Desktop/data/2021-06-09-15-16-30.bag
    此填写提取文件路径
  • /detector_img
    填写相应的topic
<launch>
      <node pkg="rosbag" type="play" name="rosbag" args=" /home/stone/Desktop/data/2021-06-09-15-16-30.bag"/>
      <node name="extract" pkg="image_view" type="extract_images" respawn="false" output="screen" cwd="ROS_HOME">
        <remap from="image" to="/detector_img"/>
      </node>
 </launch>

  • 运行
roscore
roslaunch export.launch
cd ~/.ros
mv ~/.ros/*.jpg /home/stone/Desktop/data/output
                  # 储存图片的路径

提取点云

rosrun pcl_ros bag_to_pcd /home/stone/Desktop/data/2021-06-09-15-16-30.bag /obstacle_pcl /home/stone/Desktop/data/output-obstacle

  • topic是 /obstacle_pcl

  • 保存路径是/home/stone/Desktop/data/output-obstacle

  • 点云可视化

pcl_viewer  xxx.pcd

rosws project

roslaunch ilcc2 pcd2image.launch 
<?xml version="1.0"?>
<launch>

  <node pkg="ilcc2" type="pcd2image" name="pcd2image" output="screen">

     <param name="distance_valid"         value="80" />
     <param name="extrinsic_file"         value="/config/pointgrey1.bin" />

    <param name="yaml_path"  value= "cam_matrix_b.yaml" />

    <param name="image_topic"  value= "/a_image" />
    <param name="lidar_topic"  value= "/livox/lidar" />

  </node>


</launch>

rosbag中的点云提取为png深度图

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;


// void cv::undistort	
// (	InputArray 	src,                        // 原始图像
//         OutputArray 	dst,                        // 矫正图像
//         InputArray 	cameraMatrix,               // 原相机内参矩阵
//         InputArray 	distCoeffs,                 // 相机畸变参数
//         InputArray 	newCameraMatrix = noArray() // 新相机内参矩阵
// )	

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{

  cv::Mat rectifyImage;       //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);

  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }

  inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化

      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+"abc"+".jpg", rectifyImage);
  cv::waitKey(5);
}


void callback_LidarCam(const sensor_msgs::PointCloud2ConstPtr& msg_pc,
          const sensor_msgs::ImageConstPtr& msg_img)
{


  ROS_INFO_STREAM("Velodyne scan received at " << msg_pc->header.stamp.toSec());
  ROS_INFO_STREAM("image received at " << msg_img->header.stamp.toSec());
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(*msg_pc, input_cloud);                       //转换为模板点云fromROSMsg
  cv::Mat img = cv_bridge::toCvCopy(msg_img,"bgr8")->image;    //image从ros转为opencv常用格式
  processData(img, input_cloud.makeShared());
}






int number=0;
 void chatterCallback(const sensor_msgs::PointCloud2& msg_pc)
 {   
  ROS_INFO("here");
  number=number+1;
  std::string mystring=std::to_string(number);

  
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(msg_pc, input_cloud);  
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<input_cloud.size(); index++)  //找深度的最大值和最小值
  {
      ptmp =input_cloud.points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
 inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  cv::Mat greyimage(360,640,CV_8UC1); 

for (int i = 0; i < greyimage.rows; i++)
	{
		uchar *p = greyimage.ptr<uchar>(i);
		for (int j = 0; j < greyimage.cols; j++)
		{
			
				p[j] = 0;
	
		}
  }


  for(unsigned int index=0; index<input_cloud.size(); index++)
  {

    ptmp = input_cloud.points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化
      uchar *p = greyimage.ptr<uchar>(y);
      p[x] = h;
//      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  // cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  // cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  double secs =msg_pc.header.stamp.toSec();
 
   std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+std::to_string(secs)+".png", greyimage);
  cv::waitKey(5);
  
}

int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );

  message_filters::Subscriber<sensor_msgs::PointCloud2> cloud_sub(nh, lidar_topic,2);      //点云接收
  message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, image_topic, 2);            // 图像接收

  ros::Subscriber sub = nh.subscribe(lidar_topic, 1000, chatterCallback);


  // typedef sync_policies::ApproximateTime<sensor_msgs::PointCloud2, sensor_msgs::Image> MySyncPolicy;   //时间戳同步
  // Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), cloud_sub, image_sub);
  // sync.registerCallback(boost::bind(&callback_LidarCam, _1, _2));                                  //回调函数
  // ROS_INFO("waiting for lidar image topic %s %s", lidar_topic.c_str(), image_topic.c_str());       

 ros::spin();
  // while (ros::ok())
  // {
    
  //   ros::spin();
  // }
}




rosbag获取矫正过的图像png

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;


// void cv::undistort	
// (	InputArray 	src,                        // 原始图像
//         OutputArray 	dst,                        // 矫正图像
//         InputArray 	cameraMatrix,               // 原相机内参矩阵
//         InputArray 	distCoeffs,                 // 相机畸变参数
//         InputArray 	newCameraMatrix = noArray() // 新相机内参矩阵
// )	

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{

  cv::Mat rectifyImage;       //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);

  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }

  inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化

      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+"abc"+".jpg", rectifyImage);
  cv::waitKey(5);
}


void callback_LidarCam(const sensor_msgs::PointCloud2ConstPtr& msg_pc,
          const sensor_msgs::ImageConstPtr& msg_img)
{


  ROS_INFO_STREAM("Velodyne scan received at " << msg_pc->header.stamp.toSec());
  ROS_INFO_STREAM("image received at " << msg_img->header.stamp.toSec());
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(*msg_pc, input_cloud);                       //转lidar_topic换为模板点云fromROSMsg
  cv::Mat img = cv_bridge::toCvCopy(msg_img,"bgr8")->image;    //image从ros转为opencv常用格式
  processData(img, input_cloud.makeShared());



}


int number=0;

 void camCallback(const sensor_msgs::Image& msg_img)
 {   
   ROS_INFO_STREAM("image received at " << msg_img.header.stamp.toSec());
  cv::Mat img = cv_bridge::toCvCopy(msg_img,"bgr8")->image; 

  cv::Mat rectifyImage;       //矫正的图像
  cv::resize(img, img, cv::Size(640, 360));
  cv::undistort(img, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
 

  double secs =msg_img.header.stamp.toSec(); 
  std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_image/"+std::to_string(secs)+".png", img);
  cv::waitKey(5);

   
 


 }






 void lidarCallback(const sensor_msgs::PointCloud2& msg_pc)
 {   
  ROS_INFO("here");
  number=number+1;
  std::string mystring=std::to_string(number);

  
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(msg_pc, input_cloud);  
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<input_cloud.size(); index++)  //找深度的最大值和最小值
  {
      ptmp =input_cloud.points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
 inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  cv::Mat greyimage(360,640,CV_8UC1); 

for (int i = 0; i < greyimage.rows; i++)
	{
		uchar *p = greyimage.ptr<uchar>(i);
		for (int j = 0; j < greyimage.cols; j++)
		{
			
				p[j] = 0;
	
		}
  }


  for(unsigned int index=0; index<input_cloud.size(); index++)
  {

    ptmp = input_cloud.points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化
      uchar *p = greyimage.ptr<uchar>(y);
      p[x] = h;
//      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  // cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  // cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  double secs =msg_pc.header.stamp.toSec();
 
   std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+std::to_string(secs)+".png", greyimage);
  cv::waitKey(5);
  
}

int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );

  message_filters::Subscriber<sensor_msgs::PointCloud2> cloud_sub(nh, lidar_topic,2);      //点云接收
  message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, image_topic, 2);            // 图像接收

  ros::Subscriber sub = nh.subscribe(image_topic, 1000, camCallback);


  // typedef sync_policies::ApproximateTime<sensor_msgs::PointCloud2, sensor_msgs::Image> MySyncPolicy;   //时间戳同步
  // Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), cloud_sub, image_sub);
  // sync.registerCallback(boost::bind(&callback_LidarCam, _1, _2));                                  //回调函数
  // ROS_INFO("waiting for lidar image topic %s %s", lidar_topic.c_str(), image_topic.c_str());       

 ros::spin();
  // while (ros::ok())
  // {
    
  //   ros::spin();
  // }
}




从rosbag同时获取点云和图像

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;


// void cv::undistort	
// (	InputArray 	src,                        // 原始图像
//         OutputArray 	dst,                        // 矫正图像
//         InputArray 	cameraMatrix,               // 原相机内参矩阵
//         InputArray 	distCoeffs,                 // 相机畸变参数
//         InputArray 	newCameraMatrix = noArray() // 新相机内参矩阵
// )	

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{

  cv::Mat rectifyImage;       //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);

  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }

  inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化

      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+"abc"+".jpg", rectifyImage);
  cv::waitKey(5);
}


void callback_LidarCam(const sensor_msgs::PointCloud2ConstPtr& msg_pc,
          const sensor_msgs::ImageConstPtr& msg_img)
{


  ROS_INFO_STREAM("Velodyne scan received at " << msg_pc->header.stamp.toSec());
  ROS_INFO_STREAM("image received at " << msg_img->header.stamp.toSec());
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(*msg_pc, input_cloud);                       //转lidar_topic换为模板点云fromROSMsg
  cv::Mat img = cv_bridge::toCvCopy(msg_img,"bgr8")->image;    //image从ros转为opencv常用格式
  processData(img, input_cloud.makeShared());



}


int number=0;

 void camCallback(const sensor_msgs::Image& msg_img)
 {   
   ROS_INFO_STREAM("image received at " << msg_img.header.stamp.toSec());
  cv::Mat img = cv_bridge::toCvCopy(msg_img,"bgr8")->image; 

  cv::Mat rectifyImage;       //矫正的图像
  cv::resize(img, img, cv::Size(640, 360));
  cv::undistort(img, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
 

  double secs =msg_img.header.stamp.toSec(); 
  std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_image/"+std::to_string(secs)+".png", img);
  cv::waitKey(5);

   

 }






 void lidarCallback(const sensor_msgs::PointCloud2& msg_pc)
 {   
  ROS_INFO_STREAM("lidar received at " << msg_pc.header.stamp.toSec());

  number=number+1;
  std::string mystring=std::to_string(number);

  
  pcl::PointCloud<pcl::PointXYZI> input_cloud;
  pcl::fromROSMsg(msg_pc, input_cloud);  
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<input_cloud.size(); index++)  //找深度的最大值和最小值
  {
      ptmp =input_cloud.points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
 inten_low = 0;
  inten_high = 60;


//  std::cout<<"start project "<< cloud->size() << " ,  ";
  int counter = 0;
  cv::Mat greyimage(360,640,CV_8UC1); 

for (int i = 0; i < greyimage.rows; i++)
	{
		uchar *p = greyimage.ptr<uchar>(i);
		for (int j = 0; j < greyimage.cols; j++)
		{
			
				p[j] = 0;
	
		}
  }


  for(unsigned int index=0; index<input_cloud.size(); index++)
  {

    ptmp = input_cloud.points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];

      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high-inten_low)*255;  //对深度进行归一化
      uchar *p = greyimage.ptr<uchar>(y);
      p[x] = h;
//      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);

//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
//      cv::circle(rectifyImage, cv::Point2d(x,y), 0.8, cv::Scalar(r,g,b), 0.8);
//      image.ptr<uchar>(y)[x*3] = 255;
//      image.ptr<uchar>(y)[x*3+1] = 0;
//      image.ptr<uchar>(y)[x*3+2] = 0;

      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  // cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  // cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  double secs =msg_pc.header.stamp.toSec();
 
   std::string package_path = ros::package::getPath("ilcc2");
  cv::imwrite(package_path+"/process_data/"+std::to_string(secs)+".png", greyimage);
  cv::waitKey(5);
  
}

int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );

  message_filters::Subscriber<sensor_msgs::PointCloud2> cloud_sub(nh, lidar_topic,2);      //点云接收
  message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, image_topic, 2);            // 图像接收

  ros::Subscriber sub = nh.subscribe(image_topic, 1000, camCallback);

  ros::Subscriber asub = nh.subscribe(lidar_topic, 1000, lidarCallback);
  // typedef sync_policies::ApproximateTime<sensor_msgs::PointCloud2, sensor_msgs::Image> MySyncPolicy;   //时间戳同步
  // Synchronizer<MySyncPolicy> sync(MySyncPolicy(10), cloud_sub, image_sub);
  // sync.registerCallback(boost::bind(&callback_LidarCam, _1, _2));                                  //回调函数
  // ROS_INFO("waiting for lidar image topic %s %s", lidar_topic.c_str(), image_topic.c_str());       

 ros::spin();
  // while (ros::ok())
  // {
    
  //   ros::spin();
  // }
}


深度图和rgb重叠显示

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;









void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{

  cv::Mat rectifyImage; 
  cv::resize(image, image, cv::Size(640, 360));      //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);

  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
  ROS_INFO("max distance: %d", int(inten_high));
  inten_low = 0;
  inten_high = 60;

  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];
      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/( distance_valid -inten_low)*255;  //对深度进行归一化
      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);
      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
      counter++;
    }
  }
//  std::cout << counter << " points ok\n";
  cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  // std::string package_path = ros::package::getPath("ilcc2");
  // cv::imwrite(package_path+"/process_data/"+"abc"+".jpg", rectifyImage);
  cv::waitKey(5);
}






std::deque<cv::Mat> img_buffer;
std::deque<pcl::PointCloud<pcl::PointXYZI>> cloud_buffer;


void ImageHandler(const sensor_msgs::Image::ConstPtr& img_msg) {
  cv::Mat img = cv_bridge::toCvCopy(img_msg)->image;
  if (!img.empty()) {
    img_buffer.push_back(img);
    if (img_buffer.size() > 3) {
      img_buffer.pop_front();
    }
  }
}

void LiDARHandler(const sensor_msgs::PointCloud2::ConstPtr& lidar_msg) {
  pcl::PointCloud<pcl::PointXYZI> cloud;
  pcl::fromROSMsg(*lidar_msg, cloud);

  cloud_buffer.push_back(cloud);
  if (cloud_buffer.size() > 3) {
    cloud_buffer.pop_front();
  }
}




int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );


  ros::Subscriber sub_livox =nh.subscribe<sensor_msgs::PointCloud2>(lidar_topic, 10, LiDARHandler);
  ros::Subscriber sub_image = nh.subscribe<sensor_msgs::Image>(image_topic, 20, ImageHandler);
 
  ros::Rate loop_rate(10);



  while (ros::ok()) {
    if (img_buffer.size() > 0 && cloud_buffer.size() > 0) {
      cv::Mat tmp_image = img_buffer.back();
      pcl::PointCloud<pcl::PointXYZI> tmp_cloud = cloud_buffer.back();
      img_buffer.clear();

      processData(tmp_image, tmp_cloud.makeShared());

    }

    ros::spinOnce();
    loop_rate.sleep();
  }


  return 0;

}

融合的同时保存对应的深度图和RGB图像

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;

int num=0;

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{
  num=num+1;
  std::string package_path = ros::package::getPath("ilcc2");
  cv::Mat rectifyImage; 
  cv::resize(image, image, cv::Size(640, 360));      //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
  cv::imwrite(package_path+"/data/image/"+std::to_string(num)+".jpg", rectifyImage);
  
  cv::Mat depthimage(360,640,CV_8UC1); 

  for (int i = 0; i < depthimage.rows; i++)
    {
      uchar *p = depthimage.ptr<uchar>(i);
      for (int j = 0; j < depthimage.cols; j++)
      {
        
          p[j] = 0;
    
      }
    }
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            #include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;

int num=0;

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{
  num=num+1;
  std::string package_path = ros::package::getPath("ilcc2");
  cv::Mat rectifyImage; 
   cv::resize(image, image, cv::Size( 640, 360));     //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
  cv::resize(rectifyImage, rectifyImage, cv::Size( 640, 360)); 

  cv::Mat saverectifyImage;
  saverectifyImage= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/image/"+std::to_string(num)+".png", saverectifyImage);
  
  cv::Mat depthimage(360,640,CV_16UC1); 
 
  for (int i = 0; i < depthimage.rows; i++)
    {
      unsigned short *p = depthimage.ptr<unsigned short>(i);
      for (int j = 0; j < depthimage.cols; j++)
      {
        
          p[j] = 0;
    
      }
    }
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
  ROS_INFO("max distance: %d", int(inten_high));
  inten_low = 0;
  inten_high = 260;

  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];
      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high -inten_low)*65535;  //对深度进行归一化

      unsigned short *p = depthimage.ptr<unsigned short>(y);
      p[x] = h;

      image_corners_est->HSVtoRGB(h/256, 100, 100, &r, &g, &b);
      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
      counter++;
    }
  }
//  std::cout << counter << " points ok\n";






  //cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
 
  cv::Mat savelidar_image;
  savelidar_image= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/lidar_image/"+std::to_string(num)+".png", savelidar_image);
  vector<int>  compression_params;
  compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
  compression_params.push_back(0);

  cv::Mat savedepthimage;
  savedepthimage= depthimage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/depthmap/"+std::to_string(num)+".png", savedepthimage,compression_params);
  cv::waitKey(5);
}




std::deque<cv::Mat> img_buffer;
std::deque<pcl::PointCloud<pcl::PointXYZI>> cloud_buffer;


void ImageHandler(const sensor_msgs::Image::ConstPtr& img_msg) {
  cv::Mat img = cv_bridge::toCvCopy(img_msg)->image;
  if (!img.empty()) {
    img_buffer.push_back(img);
    if (img_buffer.size() > 3) {
      img_buffer.pop_front();
    }
  }
}

void LiDARHandler(const sensor_msgs::PointCloud2::ConstPtr& lidar_msg) {
  pcl::PointCloud<pcl::PointXYZI> cloud;
  pcl::fromROSMsg(*lidar_msg, cloud);

  cloud_buffer.push_back(cloud);
  if (cloud_buffer.size() > 3) {
    cloud_buffer.pop_front();
  }
}




int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );


  ros::Subscriber sub_livox =nh.subscribe<sensor_msgs::PointCloud2>(lidar_topic, 10, LiDARHandler);
  ros::Subscriber sub_image = nh.subscribe<sensor_msgs::Image>(image_topic, 20, ImageHandler);
 
  ros::Rate loop_rate(10);



  while (ros::ok()) {
    if (img_buffer.size() > 0 && cloud_buffer.size() > 0) {
      cv::Mat tmp_image = img_buffer.back();
      pcl::PointCloud<pcl::PointXYZI> tmp_cloud = cloud_buffer.back();
      img_buffer.clear();

      processData(tmp_image, tmp_cloud.makeShared());

    }

    ros::spinOnce();
    loop_rate.sleep();
  }


  return 0;





}




  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
  ROS_INFO("max distance: %d", int(inten_high));
  inten_low = 0;
  inten_high = 60;

  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];
      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high -inten_low)*255;  //对深度进行归一化

      uchar *p = depthimage.ptr<uchar>(y);
      p[x] = h;

      image_corners_est->HSVtoRGB(h, 100, 100, &r, &g, &b);
      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
      counter++;
    }
  }
//  std::cout << counter << " points ok\n";






  cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
  cv::imwrite(package_path+"/data/lidar_image/"+std::to_string(num)+".jpg", rectifyImage);
  cv::imwrite(package_path+"/data/depthmap/"+std::to_string(num)+".png", depthimage);
  cv::waitKey(5);
}




std::deque<cv::Mat> img_buffer;
std::deque<pcl::PointCloud<pcl::PointXYZI>> cloud_buffer;


void ImageHandler(const sensor_msgs::Image::ConstPtr& img_msg) {
  cv::Mat img = cv_bridge::toCvCopy(img_msg)->image;
  if (!img.empty()) {
    img_buffer.push_back(img);
    if (img_buffer.size() > 3) {
      img_buffer.pop_front();
    }
  }
}

void LiDARHandler(const sensor_msgs::PointCloud2::ConstPtr& lidar_msg) {
  pcl::PointCloud<pcl::PointXYZI> cloud;
  pcl::fromROSMsg(*lidar_msg, cloud);

  cloud_buffer.push_back(cloud);
  if (cloud_buffer.size() > 3) {
    cloud_buffer.pop_front();
  }
}




int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );


  ros::Subscriber sub_livox =nh.subscribe<sensor_msgs::PointCloud2>(lidar_topic, 10, LiDARHandler);
  ros::Subscriber sub_image = nh.subscribe<sensor_msgs::Image>(image_topic, 20, ImageHandler);
 
  ros::Rate loop_rate(10);



  while (ros::ok()) {
    if (img_buffer.size() > 0 && cloud_buffer.size() > 0) {
      cv::Mat tmp_image = img_buffer.back();
      pcl::PointCloud<pcl::PointXYZI> tmp_cloud = cloud_buffer.back();
      img_buffer.clear();

      processData(tmp_image, tmp_cloud.makeShared());

    }

    ros::spinOnce();
    loop_rate.sleep();
  }


  return 0;





}




深度图改为16bit

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;

int num=0;

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{
  num=num+1;
  std::string package_path = ros::package::getPath("ilcc2");
  cv::Mat rectifyImage; 
   cv::resize(image, image, cv::Size( 640, 360));     //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
  cv::resize(rectifyImage, rectifyImage, cv::Size( 640, 360)); 

  cv::Mat saverectifyImage;
  saverectifyImage= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/image/"+std::to_string(num)+".png", saverectifyImage);
  
  cv::Mat depthimage(360,640,CV_16UC1); 
 
  for (int i = 0; i < depthimage.rows; i++)
    {
      unsigned short *p = depthimage.ptr<unsigned short>(i);
      for (int j = 0; j < depthimage.cols; j++)
      {
        
          p[j] = 0;
    
      }
    }
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double inten_low=255, inten_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(inten_low > ptmp.intensity)
          inten_low = ptmp.intensity;
      if(inten_high < ptmp.intensity)
          inten_high = ptmp.intensity;
      datas.push_back(ptmp.intensity);
  }
  ROS_INFO("max distance: %d", int(inten_high));
  inten_low = 0;
  inten_high = 260;

  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];
      unsigned char r, g, b;

      double h = (ptmp.intensity-inten_low)/(inten_high -inten_low)*65535;  //对深度进行归一化

      unsigned short *p = depthimage.ptr<unsigned short>(y);
      p[x] = h;

      image_corners_est->HSVtoRGB(h/256, 100, 100, &r, &g, &b);
      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
      counter++;
    }
  }
//  std::cout << counter << " points ok\n";






  //cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
 
  cv::Mat savelidar_image;
  savelidar_image= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/lidar_image/"+std::to_string(num)+".png", savelidar_image);
  vector<int>  compression_params;
  compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
  compression_params.push_back(0);

  cv::Mat savedepthimage;
  savedepthimage= depthimage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/depthmap/"+std::to_string(num)+".png", savedepthimage,compression_params);
  cv::waitKey(5);
}




std::deque<cv::Mat> img_buffer;
std::deque<pcl::PointCloud<pcl::PointXYZI>> cloud_buffer;


void ImageHandler(const sensor_msgs::Image::ConstPtr& img_msg) {
  cv::Mat img = cv_bridge::toCvCopy(img_msg)->image;
  if (!img.empty()) {
    img_buffer.push_back(img);
    if (img_buffer.size() > 3) {
      img_buffer.pop_front();
    }
  }
}

void LiDARHandler(const sensor_msgs::PointCloud2::ConstPtr& lidar_msg) {
  pcl::PointCloud<pcl::PointXYZI> cloud;
  pcl::fromROSMsg(*lidar_msg, cloud);

  cloud_buffer.push_back(cloud);
  if (cloud_buffer.size() > 3) {
    cloud_buffer.pop_front();
  }
}




int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );


  ros::Subscriber sub_livox =nh.subscribe<sensor_msgs::PointCloud2>(lidar_topic, 10, LiDARHandler);
  ros::Subscriber sub_image = nh.subscribe<sensor_msgs::Image>(image_topic, 20, ImageHandler);
 
  ros::Rate loop_rate(10);



  while (ros::ok()) {
    if (img_buffer.size() > 0 && cloud_buffer.size() > 0) {
      cv::Mat tmp_image = img_buffer.back();
      pcl::PointCloud<pcl::PointXYZI> tmp_cloud = cloud_buffer.back();
      img_buffer.clear();

      processData(tmp_image, tmp_cloud.makeShared());

    }

    ros::spinOnce();
    loop_rate.sleep();
  }


  return 0;





}




png转为深度图

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath
#include <Eigen/Core>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>


#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>    /// pcl::transformPointCloud
#include "ilcc2/ImageCornersEst.h"
#include "ilcc2/config.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);
ros::Publisher pubLaserCloud;
double distance_valid;


int main(int argc, char** argv){

  ros::init(argc,argv,"rgb_lidar");

  ros::NodeHandle nh;

  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

 
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data_backup/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");

  std::string extrinsic_path = package_path + extrinsic_file;
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) );
  image_corners_est->txt2extrinsic( extrinsic_path );

  cv::Mat rgb, depth;
  rgb = cv::imread("/home/stone/fsdownload/1.png");
  const char filename[] = "/home/stone/Desktop/rosws/src/ilcc2/data/depthmap/1.png";
	//“2”拿深度
  depth = cv::imread(filename, 2);
  cv::imshow("img", depth);
  cv::waitKey(0);

  pcl::PointCloud<pcl::PointXYZRGB>::Ptr rgbcloud(new pcl::PointCloud<pcl::PointXYZRGB>);
  Eigen::Matrix3d m_Rinverse;
  m_Rinverse=image_corners_est-> m_R;
  cout <<"inverse.............." << endl;
  m_Rinverse = m_Rinverse.inverse().eval(); 
  cout <<"inverse get.............." << endl;

  pubLaserCloud = nh.advertise<sensor_msgs::PointCloud2>("/rgb_lidar", 10);
  ros::Rate loop_rate(10);



  while (ros::ok()) {

	for (int m = 0; m < depth.rows; m++)
	{	
		for (int n = 0; n < depth.cols; n++)
		{ 
				ushort d = depth.ptr<ushort>(m)[n];
				if (d == 0)
					continue;
				pcl::PointXYZRGB rgbpoint;
				Eigen::Vector3d P_tmp;
				P_tmp[0] = d*(m-image_corners_est->m_cx)/image_corners_est->m_fx;
				P_tmp[1] = d*(n-(image_corners_est->m_cy-4))/image_corners_est->m_fy;
				P_tmp[2] = d;
			
				Eigen::Vector3d P_w;
				P_w = m_Rinverse*(P_tmp-image_corners_est->m_t);
				rgbpoint.x = P_w[0];
				rgbpoint.y = P_w[1];
				rgbpoint.z = P_w[2];

				rgbpoint.b =  rgb.ptr<uchar>(n)[m*3];
				rgbpoint.g =  rgb.ptr<uchar>(n)[m*3+1];
				rgbpoint.r =  rgb.ptr<uchar>(n)[m*3+2];

				rgbcloud->push_back(rgbpoint);
				
		}
	}
	  sensor_msgs::PointCloud2 cloud_msg;
      pcl::toROSMsg(*rgbcloud, cloud_msg);
      cloud_msg.header.stamp = ros::Time::now();
      cloud_msg.header.frame_id = "/livox_frame";

      pubLaserCloud.publish(cloud_msg);

	  rgbcloud->is_dense = false;
	  cout <<"here.............." << endl;
	try {
		//保存点云图
		pcl::io::savePCDFile("/home/stone/pcd.pcd", *rgbcloud);
	}
	catch (pcl::IOException &e) {
		cout << e.what() << endl;
	}

	
	ros::spinOnce();
	loop_rate.sleep();

    }

 
  

}




前面整错了,应该是深度

#include <fstream>
#include <iostream>
#include <math.h>
#include <ros/package.h>        /// ros::package::getPath

#include <Eigen/Core>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>

#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>

#include <sstream>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/common/transforms.h>          /// pcl::transformPointCloud

#include "ilcc2/ImageCornersEst.h"

using namespace std;
using namespace message_filters;

ImageCornersEst::Ptr image_corners_est(new ImageCornersEst);

double distance_valid;

int num=0;

void processData(cv::Mat image, pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)  
{
  num=num+1;
  std::string package_path = ros::package::getPath("ilcc2");
  cv::Mat rectifyImage; 
   cv::resize(image, image, cv::Size( 640, 360));     //矫正的图像
  cv::undistort(image, rectifyImage, image_corners_est->camK, image_corners_est->distort_param, image_corners_est->camK);
  cv::resize(rectifyImage, rectifyImage, cv::Size( 640, 360)); 

  cv::Mat saverectifyImage;
  saverectifyImage= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/image/"+std::to_string(num)+".png", saverectifyImage);
  
  cv::Mat depthimage(360,640,CV_16UC1); 
 
  for (int i = 0; i < depthimage.rows; i++)
    {
      unsigned short *p = depthimage.ptr<unsigned short>(i);
      for (int j = 0; j < depthimage.cols; j++)
      {
        
          p[j] = 0;
    
      }
    }
  pcl::PointXYZI ptmp;

  /// 生成每个点的颜色
  double z_low=255, z_high=0;    
  std::vector<double> datas;            
  for(unsigned int index=0; index<cloud->size(); index++)  //找深度的最大值和最小值
  {
      ptmp = cloud->points[index];
      if(z_low > ptmp.z)
          z_low = ptmp.z;
      if(z_high < ptmp.z)
          z_high = ptmp.z;
      datas.push_back(ptmp.z);
  }
  ROS_INFO("max distance: %lf   ,min distance: %lf", z_high, z_low);
  z_low = -2;
  z_high = 40;

  int counter = 0;
  for(unsigned int index=0; index<cloud->size(); index++)
  {

    ptmp = cloud->points[index];
   // std::cout << ptmp.z << endl;
    Eigen::Vector3d Pw(ptmp.x, ptmp.y, ptmp.z);
    Eigen::Vector2d Pcam;
       //转换平面
    if ( image_corners_est->spaceToPlane(Pw, Pcam, distance_valid) ) {

      int x = Pcam[0];
      int y = Pcam[1];
      unsigned char r, g, b;

      double h = (ptmp.z-z_low)/(z_high -z_low)*65535;  //对深度进行归一化

      unsigned short *p = depthimage.ptr<unsigned short>(y);
      p[x] = h;

      image_corners_est->HSVtoRGB(h/256, 100, 100, &r, &g, &b);
      cv::circle(rectifyImage, cv::Point2d(x,y), 0.6, cv::Scalar(r,g,b), 2);
      counter++;
    }
  }
//  std::cout << counter << " points ok\n";






  //cv::resize(rectifyImage, rectifyImage, cv::Size(rectifyImage.cols/1, rectifyImage.rows/1));
  cv::imshow("img_liar_point", rectifyImage);  //矫正后的图像
 
  cv::Mat savelidar_image;
  savelidar_image= rectifyImage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/lidar_image/"+std::to_string(num)+".png", savelidar_image);
  vector<int>  compression_params;
  compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
  compression_params.push_back(0);

  cv::Mat savedepthimage;
  savedepthimage= depthimage(cv::Rect(0,4,640,352)); 
  cv::imwrite(package_path+"/data/depthmap/"+std::to_string(num)+".png", savedepthimage,compression_params);
  cv::waitKey(5);
}




std::deque<cv::Mat> img_buffer;
std::deque<pcl::PointCloud<pcl::PointXYZI>> cloud_buffer;


void ImageHandler(const sensor_msgs::Image::ConstPtr& img_msg) {
  cv::Mat img = cv_bridge::toCvCopy(img_msg)->image;
  if (!img.empty()) {
    img_buffer.push_back(img);
    if (img_buffer.size() > 3) {
      img_buffer.pop_front();
    }
  }
}

void LiDARHandler(const sensor_msgs::PointCloud2::ConstPtr& lidar_msg) {
  pcl::PointCloud<pcl::PointXYZI> cloud;
  pcl::fromROSMsg(*lidar_msg, cloud);

  cloud_buffer.push_back(cloud);
  if (cloud_buffer.size() > 3) {
    cloud_buffer.pop_front();
  }
}




int main(int argc, char** argv){

  ros::init(argc,argv,"pcd2image");
  ros::NodeHandle nh;
  std::string package_path = ros::package::getPath("ilcc2");
  ROS_INFO("ilcc2 package at %s", package_path.c_str());


  std::string yaml_path, image_topic, lidar_topic;
  std::string extrinsic_file;
  ros::NodeHandle nh_private("~");

   
                                                                        
  nh_private.param<double>("distance_valid", distance_valid, 5);
  nh_private.param<std::string>("extrinsic_file", extrinsic_file, "/process_data/pose.bin");
  nh_private.param<std::string>("yaml_path", yaml_path, "30w.yaml");
  nh_private.param<std::string>("image_topic", image_topic, "/front");
  nh_private.param<std::string>("lidar_topic", lidar_topic, "/velodyne_points");


  std::string extrinsic_path = package_path + extrinsic_file;             //外参路径
  ROS_INFO("load T_lidar2cam from %s", extrinsic_path.c_str());             

  image_corners_est = ImageCornersEst::Ptr(new ImageCornersEst(package_path+"/config/"+yaml_path, true) ); //内参路径
  image_corners_est->txt2extrinsic( extrinsic_path );


  ros::Subscriber sub_livox =nh.subscribe<sensor_msgs::PointCloud2>(lidar_topic, 10, LiDARHandler);
  ros::Subscriber sub_image = nh.subscribe<sensor_msgs::Image>(image_topic, 20, ImageHandler);
 
  ros::Rate loop_rate(10);



  while (ros::ok()) {
    if (img_buffer.size() > 0 && cloud_buffer.size() > 0) {
      cv::Mat tmp_image = img_buffer.back();
      pcl::PointCloud<pcl::PointXYZI> tmp_cloud = cloud_buffer.back();
      img_buffer.clear();

      processData(tmp_image, tmp_cloud.makeShared());

    }

    ros::spinOnce();
    loop_rate.sleep();
  }


  return 0;





}





  • 6
    点赞
  • 40
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 我可以回答这个问题。将二进制格式的点云转换为文本格式的点云可以使用以下代码: ```c #include <stdio.h> #include <stdlib.h> typedef struct { float x; float y; float z; } Point; int main(int argc, char *argv[]) { if (argc != 3) { printf("Usage: %s input.bin output.txt\n", argv[]); return 1; } FILE *input = fopen(argv[1], "rb"); if (input == NULL) { printf("Failed to open input file: %s\n", argv[1]); return 1; } FILE *output = fopen(argv[2], "w"); if (output == NULL) { printf("Failed to open output file: %s\n", argv[2]); fclose(input); return 1; } Point point; while (fread(&point, sizeof(Point), 1, input) == 1) { fprintf(output, "%f %f %f\n", point.x, point.y, point.z); } fclose(input); fclose(output); return ; } ``` 这个程序将从名为 `input.bin` 的二进制文件读取点云数据,并将其写入名为 `output.txt` 的文本文件。每个点在文本文件占一行,由三个浮点数表示其 x、y 和 z 坐标。 ### 回答2: 将二进制格式的点云文件(.bin)转换为文本格式的点云文件(.txt)可以通过以下代码实现: ```cpp #include <iostream> #include <fstream> #include <vector> struct PointCloud { float x; float y; float z; float intensity; }; int main() { std::string inputPath = "input.bin"; // 输入的二进制点云文件路径 std::string outputPath = "output.txt"; // 输出的文本点云文件路径 std::ifstream inputFile(inputPath, std::ios::binary); if (!inputFile.is_open()) { std::cout << "无法打开输入文件!" << std::endl; return 0; } inputFile.seekg(0, std::ios::end); int fileSize = inputFile.tellg(); // 获取文件大小 inputFile.seekg(0, std::ios::beg); int pointCount = fileSize / sizeof(PointCloud); // 计算点云点的数量 std::vector<PointCloud> pointCloudData(pointCount); inputFile.read((char*)&pointCloudData[0], fileSize); // 读取二进制文件数据 inputFile.close(); std::ofstream outputFile(outputPath); if (!outputFile.is_open()) { std::cout << "无法创建输出文件!" << std::endl; return 0; } for (int i = 0; i < pointCount; i++) { outputFile << pointCloudData[i].x << " " << pointCloudData[i].y << " " << pointCloudData[i].z << " " << pointCloudData[i].intensity << "\n"; } outputFile.close(); std::cout << "转换完成!" << std::endl; return 0; } ``` 这段代码使用C++编写,首先通过指定输入的二进制点云文件路径和输出的文本点云文件路径。然后打开输入的二进制文件并获取文件大小。接着根据文件大小计算点云点的数量,并动态分配内存保存点云数据。 接下来,从输入文件读取二进制数据,并将其存储在vector。最后,打开输出文件,迭代读取vector数据,并按照指定的格式写入到输出文件。完成后,关闭文件并输出转换完成的消息。 需要注意的是,这段代码假定二进制文件数据结构是`struct PointCloud`,包含了x、y、z坐标和强度信息。如有需要,可以根据实际情况进行修改。 ### 回答3: 要将二进制(.bin)格式的点云转换为文本(.txt)格式,可以使用以下代码: ```python import numpy as np def convert_bin_to_txt(bin_file, txt_file): # 读取二进制文件 point_cloud = np.fromfile(bin_file, dtype=np.float32).reshape(-1, 4) # 保存为文本文件 np.savetxt(txt_file, point_cloud, fmt='%.6f', delimiter=' ') print("转换完成") # 示例用法 convert_bin_to_txt("input.bin", "output.txt") ``` 以上代码首先导入了必要的库,其`numpy`用于数据处理和数组操作。然后定义了一个`convert_bin_to_txt`函数,该函数用于将二进制文件转换为文本文件。函数的`bin_file`和`txt_file`参数分别为输入的二进制文件和输出的文本文件。 在函数,我们使用`np.fromfile`函数从二进制文件读取数据。使用`dtype=np.float32`指定数据类型,并使用`reshape`函数将数据转换为4列的形式。这是因为通常点云数据的每个点都包含3个坐标(x、y、z)和1个颜色(r、g、b),因此总共有4列。 接下来,我们使用`np.savetxt`函数将点云数据保存为文本文件。我们指定了保存格式为`%.6f`,这表示小数点后保留6位有效数字。我们还设置了分隔符为一个空格。 最后,在示例用法,我们调用了`convert_bin_to_txt`函数,将输入的二进制文件路径和输出的文本文件路径作为参数传递给函数。完成转换后会打印出"转换完成"的提示。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值