利用kinect V1相机获取点云拟合平面获取相机角度信息

5 篇文章 0 订阅
  1. 利用深度相机获取点云二维图像
  2. 将二维图像通过相机内参转换成点云
  3. 取局部点云,对点云进行平面拟合(采用的是SVD分解),利用最小特征值对应的特征向量为平面法向量

利用深度相机获取点云二维图像

  1. 深度相机原理介绍
  2. 安装深度相机(Kinect V1)驱动
    1. 借鉴创客智造的博客:链接
  3. kinect V1的图像校准:利用上面的教程安装驱动,保证可以获取RGB和IR图像,然后按照链接进行标定
    1. RGB链接
      1. 过程文件如图

         

    2. IR链接
      1. 点击save保存结果,点击commit直接储存结果信息
      2. 过程文件如图

         

    3. RGB和IR的标定
      1. 一些比较好的博文(可以参考):zkl999999
      2. OpenCv 的Stereo Calibration
      3. 本文采用(因为后续的特征匹配也采用这种方式,这种方式只要拍一次照片就可以)
        1. 直接利用
  4. 将二维图像通过相机内参转换成点云
  5. 深度图和可见光图的对应
    1. 获取IR中点云的世界坐标(相对于红外相机)
    2. 将IR点通过IR-RGB相机外参转换转移到可见光相机坐标系下
    3. 通过可见光相机的内参矩阵进行投影(将IR中的点和可见光中的图像数据进行对应,并且进行存储-接下来称呼这个信息为RGB_with_Depth)
    4. 具体代码如下
  6. 通过Kinect V1拍摄两次,获取相机相对位姿变换
    1. 通过Kinect V1对同一个场景,在不同位置拍摄两张照片
    2. 通过步骤5计算两个位置下的RGB_with_Depth
    3. 对两个场景下的RGB图像进行特征点匹配,获取到特征点对应
    4. 对特征点对分别在各自的RGB_with_Depth里面寻找对应点,获取对应的点的三维信息
    5. 对上面的三维空间点对进行外参矩阵的拟合求解,获取两个位置的外参矩阵

 

代码:

#include<ros/ros.h> //ros标准库头文件
#include<iostream> //C++标准输入输出库
#include<math.h>
#include<cv_bridge/cv_bridge.h>
#include<sensor_msgs/image_encodings.h>
#include<image_transport/image_transport.h>
#include <pthread.h>
#include <pcl/point_cloud.h>
#include <pcl_conversions/pcl_conversions.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/features/integral_image_normal.h>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <Eigen/SVD>
#include <Eigen/Dense>

//using Eigen::MatrixXf;
using namespace Eigen;
using namespace Eigen::internal;
using namespace Eigen::Architecture;
using std::cout;
using std::endl;
using std::stringstream;
using std::string;
bool saveCloud(false);
unsigned int fileNum = 1;
#define PI 3.14159265354
float fx = 525.0;  // focal length x
float fy = 525.0;  // focal length y
float cx = 319.5;  // optical center x
float cy = 239.5;  // optical center y

float factor = 1000.0;// for the 32-bit float images in the ROS bag files
float angle=0.0;
float angle_wall=0.0;


class RGB_GRAY
{
public:
    ros::NodeHandle nh_;
    image_transport::ImageTransport it_;
    float angle=0.0;
    RGB_GRAY()
      :it_(nh_)
    {

    }

    ~RGB_GRAY()
    {
    }

};


void rgb_callback(const sensor_msgs::ImageConstPtr& msg)
{
    //ROS_INFO("rgb callback");
    cv::Mat image;
    try
    {
        image =  cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8) -> image;
        char key;
        key=cvWaitKey(33);
        if(key==32)           //the Ascii of "Space key" is 32
        saveCloud = true;
        if(saveCloud)
        {
          stringstream stream;
          stringstream stream1;
          stream <<"Goal RgbImage" << fileNum<<".jpg";
          stream1 <<"/home/leonsun/catkin_navi/imgs/" << fileNum <<".jpg";
          string filename = stream.str();
          string filename1 = stream1.str();
          imwrite(filename1,cv_bridge::toCvShare(msg)->image);
          saveCloud = false;
          fileNum++;
          cout << filename << " had Saved."<< endl;
        }
    }
    catch(cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }

    int width,height;
    width=image.cols/4;
    height=image.rows/4;


    cv::circle(image, cvPoint(image.cols / 2, image.rows / 2), 2, cv::Scalar(255, 0, 0), -1);
    cv::Rect rect(image.cols/2-width/2, image.rows/2-height/2, width, height);//左上坐标(x,y)和矩形的长(x)宽(y)
    cv::rectangle(image, rect, cv::Scalar(255, 0, 0),1, cv::LINE_8,0);
    float x_col,y_row;
    x_col=100*sin(angle_wall)+image.cols / 2;
    y_row=-100*cos(angle_wall)+image.rows / 2;

    cv::line(image,cvPoint(image.cols / 2, image.rows / 2),cvPoint(x_col,y_row),cv::Scalar(255, 0, 0));//画直线
    cv::imshow("image", image);
    cv::waitKey(5);
}

void depth_callback(const sensor_msgs::ImageConstPtr& msg)
{
    //ROS_INFO("depth callback");
  cv::Mat image;
    try
    {
        image =  cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_32FC1) -> image; //image.type==CV32FC1   5
    }
    catch(cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }
    int width,height;
    width=image.cols/4;
    height=image.rows/4;
    float a,b,a_depth,b_depth,delta,distance;
    a=image.cols/2-width/2;
    b=image.cols/2+width/2;
    int u_num=height;
    int v_num=width;
    pcl::PointCloud<pcl::PointXYZ> cloud;
    sensor_msgs::PointCloud2 output;
    cloud.width  = width;
    cloud.height = height;
    cloud.points.resize(width*height);
    float Z,X,Y;
    std::vector<cv::Point3f> points;

    //MatrixXd positions;
    int num=0;
    int points_num=0;
    float X_sum=0;
    float Y_sum=0;
    float Z_sum=0;
    for(int u=image.rows/2-height/2;u<image.rows/2+height/2;u++)
    {
      for(int v=image.cols/2-width/2;v<image.cols/2+width/2;v++)
      {
        cloud.points[num].z=image.at<float>(u,v)/factor;
        Z=image.at<float>(u,v)/factor;
        cloud.points[num].x=(u - cx) * Z / fx;
        cloud.points[num].y=(v - cy) * Z / fy;
        X_sum=X_sum+cloud.points[num].x;
        Y_sum=Y_sum+cloud.points[num].y;
        Z_sum=Z_sum+cloud.points[num].z;

        if (Z!=0)
          points_num=points_num+1;
        num=num+1;
      }
    }

    if (points_num!=0)
    {
      MatrixXf positions(points_num,3);
      float X_mean=X_sum/points_num;
      float Y_mean=Y_sum/points_num;
      float Z_mean=Z_sum/points_num;

      int b=0;
      for (int number=0;number<cloud.size();number++)
      {
        if (cloud.points[number].z!=0)
        {
          positions(b,0)=cloud.points[number].x-X_mean;

          positions(b,1)=cloud.points[number].y-Y_mean;

          positions(b,2)=cloud.points[number].z-Z_mean;

          b++;
        }
      }
      MatrixXf Covariance_matrix(3,3);
      Covariance_matrix=positions.transpose()*positions;
      JacobiSVD<Eigen::MatrixXf> svd(Covariance_matrix, ComputeThinU | ComputeThinV );
      Matrix3f V = svd.matrixV(), U = svd.matrixU();
      Matrix3f  S = U.inverse() * Covariance_matrix * V.transpose().inverse(); // S = U^-1 * A * VT * -1
      //std::cout<<"A :\n"<<positions<<std::endl;
      //cout<<Covariance_matrix<<endl;
      //std::cout<<"U :\n"<<U<<std::endl;
      //std::cout<<"S :\n"<<S<<std::endl;
      //std::cout<<"V :\n"<<V<<std::endl;
      Vector3f Fn;

      if (V(2,2)<0)
      {
        Fn(2)=-V(2,2);
        Fn(0)=-V(2,0);
        Fn(1)=-V(2,1);
      }
      else
      {
        Fn(2)=V(2,2);
        Fn(0)=V(2,0);
        Fn(1)=V(2,1);
      }


      angle_wall=atan(Fn(0)/Fn(2));
      ROS_INFO("Angle: %f", angle_wall*180/PI);
    }
    else
    {
      ROS_INFO("Please Dont stand infront of camera.");
    }
    cv::circle(image, cvPoint(image.cols / 2, image.rows / 2), 2, cv::Scalar(0, 0, 0), -1);
    cv::Rect rect(image.cols/2-width/2, image.rows/2-height/2, width, height);//左上坐标(x,y)和矩形的长(x)宽(y)
    cv::rectangle(image, rect, cv::Scalar(0, 0, 0),1, cv::LINE_8,0);
    cv::imshow("depth", image);
    cv::waitKey(5);
}

int main(int argc, char** argv)
{  
    ros::init(argc, argv, "RGB");
    ros::NodeHandle nh_;
    //image_transport::ImageTransport it_(nh_);
    ros::Subscriber image_sub_,depth_sub_;

    cv::namedWindow("image");
    cv::namedWindow("depth");

    image_sub_ = nh_.subscribe("/camera/rgb/image_raw", 1, rgb_callback);
    depth_sub_ = nh_.subscribe("/camera/depth_registered/image_raw", 1, depth_callback);
    int err;
    pthread_t id;
    ros::spin();
}

 

使用Kinect2.0获取点云的过程可以通过以下步骤实现。首先,需要搭建相应的开发环境,包括安装Windows系统和Visual Studio 2019等软件。可以参考\[2\]中提供的源码和项目模板进行环境搭建。接下来,需要使用Kinect2.0相机进行点云图像的捕获。Kinect2.0是一款RGB-D相机,支持普通相机的拍摄和脉冲测量深度信息。可以参考\[1\]中提供的开发源码,了解Kinect2.0的原理和使用方法。在代码中,可以通过调用相应的函数获取相机的深度信息和RGB图像。最后,可以根据需要对获取点云数据进行处理和拼接,实现三维点云的应用。 #### 引用[.reference_title] - *1* *2* [深度相机Kinect2.0三维点云拼接实验(四)](https://blog.csdn.net/qq_42144047/article/details/123449528)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [Kinect2.0点云数据获取](https://blog.csdn.net/weixin_42651748/article/details/112053649)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值