ROS中用opencv库实现物体中心点提取

老师搞了个很简陋的双目摄像头,只能输出两个摄像头的图像,所以为了提取定位物体中心坐标,还得算个深度距离。先对两个摄像头图像处理一下,基于阈值分割,然后提取个轮廓,计算个质心,水平实在有限,提取的精度不高。
这是左右摄像头发布二维坐标的程序

#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <geometry_msgs/Point.h> //点 消息类型

static const std::string OPENCV_WINDOW_second = "Raw Image window";
static const std::string OPENCV_WINDOW_left_second = "Raw Image window_left";
//static const std::string OPENCV_WINDOW_1 = "Edge Detection";
using namespace cv;
using namespace std;
class Edge_Detector_second
{
  ros::NodeHandle nh_;
  image_transport::ImageTransport it_;
  image_transport::Subscriber image_sub_second;
  image_transport::Subscriber image_sub_left_second;
  image_transport::Publisher image_pub_second;
  ros::Publisher leftPointPub_second;
  image_transport::Publisher image_pub_left_second;
  ros::Publisher rightPointPub_second;


public:
  Edge_Detector_second()
    : it_(nh_)
  {
    // Subscribe to input video feed and publish output video feed
    image_sub_second = it_.subscribe("/right/image_raw", 1,
      &Edge_Detector_second::right_imageCb_second, this);
    image_sub_left_second= it_.subscribe("/left/image_raw", 1,
      &Edge_Detector_second::left_imageCb_second, this);
    image_pub_second= it_.advertise("/edge_detector/right_image_second", 1);
    image_pub_left_second = it_.advertise("/edge_detector/left_image_second", 1);
    leftPointPub_second = nh_.advertise<geometry_msgs::Point>("/edge_detector/left_2Dposition_second", 1); //发布话题:left_2Dposition
    rightPointPub_second = nh_.advertise<geometry_msgs::Point>("/edge_detector/right_2Dposition_second", 1); //发布话题:right_2Dposition
    //cv::namedWindow(OPENCV_WINDOW);

  }

  ~Edge_Detector_second()
  {
    cv::destroyWindow(OPENCV_WINDOW_second);
    cv::destroyWindow(OPENCV_WINDOW_left_second);

  }

//回调函数
  void right_imageCb_second(const sensor_msgs::ImageConstPtr& msg)
  {
      geometry_msgs::Point rightObjectCenter_second;

    cv_bridge::CvImagePtr cv_ptr;
    namespace enc = sensor_msgs::image_encodings;

    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    // Draw an example circle on the video stream
    if (cv_ptr->image.rows > 200 && cv_ptr->image.cols > 300){
        RotatedRect rightFaceRectangle = detect_edges_second(cv_ptr->image);


   // detect_edges(cv_ptr->image);
Mat imag=cv_ptr->image;

        image_pub_second.publish(cv_ptr->toImageMsg());
        rightObjectCenter_second.x = rightFaceRectangle.center.x;
        rightObjectCenter_second.y = rightFaceRectangle.center.y;
      rightPointPub_second.publish(rightObjectCenter_second); //发布得到的目标坐标点
        cv::imshow(OPENCV_WINDOW_second, imag);
        cv::waitKey(3);

    }
  }
  //left
  void left_imageCb_second(const sensor_msgs::ImageConstPtr& msg)
  {
      geometry_msgs::Point leftObjectCenter_second;

    cv_bridge::CvImagePtr cv_ptr;
    namespace enc = sensor_msgs::image_encodings;

    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    // Draw an example circle on the video stream
    if (cv_ptr->image.rows > 200 && cv_ptr->image.cols > 300){
        RotatedRect leftFaceRectangle = detect_edges_second(cv_ptr->image);

    //detect_edges(cv_ptr->image);
        image_pub_left_second.publish(cv_ptr->toImageMsg());
        Mat imag=cv_ptr->image;
        leftObjectCenter_second.x = leftFaceRectangle.center.x;
        leftObjectCenter_second.y = leftFaceRectangle.center.y;
      leftPointPub_second.publish(leftObjectCenter_second); //发布得到的目标坐标点

        cv::imshow(OPENCV_WINDOW_left_second, imag);
        cv::waitKey(3);
    }
  }
  //OpenCV的边缘检测程序
  RotatedRect detect_edges_second(cv::Mat img)
  {
RotatedRect faceRectangle = RotatedRect(Point2i(0, 0), Size2i(0, 0), 0);//旋转矩形类(矩形中心 边长 旋转角度)
    cv::Mat src, src_gray;
    cv::Mat dst, detected_edges,OtsuImage;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    int edgeThresh = 1;
    int lowThreshold = 200;
    int highThreshold =300;
    int kernel_size = 5;

    int largest_area = 0;
    int largest_contour_index = 0;
    Mat1b mask1, mask2;
    img.copyTo(src);
    Mat img2,mask;
    img.copyTo(src);
    cv::cvtColor( img, src_gray, cv::COLOR_BGR2GRAY );
    cv::blur( src_gray, detected_edges, cv::Size(5,5) );
  
    threshold(detected_edges,OtsuImage,200,255,THRESH_OTSU);//0不起作用,可为任意阈值
    cv::findContours( OtsuImage, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
    vector<Moments> mu(contours.size() );
\
        for( int i = 0; i < contours.size(); i++ )
        {
           // double a = contourArea(contours[i], false);  //  Find the area of contour
          //  if (a > largest_area) {
             //   largest_area = a;
            //    largest_contour_index = i;                //Store the index of largest contour
          //  }
           // mu[i] = moments( contours[i], false );
            mu[i] = moments( contours[i], false );
        }
        //计算轮廓的质心

        vector<Point2f> mc( contours.size() );

        for( int i = 0; i < contours.size(); i++ )
        {
            mc[i] = cv::Point2d( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
        }
       //画轮廓及其质心并显示
        cv::Mat drawing =cv::Mat::zeros( detected_edges.size(), CV_8UC3 );

            for( int i = 0; i< contours.size(); i++ )
            {
            Scalar color = Scalar( 255, 0, 0);
            drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
            circle( drawing, mc[i], 5, Scalar( 0, 0, 255), -1, 8, 0 );
            rectangle(drawing, boundingRect(contours.at(i)), cvScalar(0,255,0));
            char tam[100];
            sprintf(tam, "(%0.0f,%0.0f)",mc[i].x,mc[i].y);
            putText(drawing, tam, Point(mc[i].x, mc[i].y), FONT_HERSHEY_SIMPLEX, 0.4, cvScalar(255,0,255),1);
}

    dst = cv::Scalar::all(0);
    img.copyTo( dst, detected_edges);
    drawing.copyTo(img);

        //cv::imshow(OPENCV_WINDOW, src);
        //cv::imshow(OPENCV_WINDOW_1, dst);
        //cv::imshow( "Contours", drawing );
      // cv::imshow( "dstImg", dstImg );
    if(contours.size()>0)

    {
        int i=0;
               for( int i = 0; i< contours.size(); i++ )
                {
               double a = contourArea(contours[i], false);  //  Find the area of contour
                           if (a > largest_area) {
                               largest_area = a;
                              largest_contour_index = i;                //Store the index of largest contour
                         }
               }
    faceRectangle.center.x = mc[largest_contour_index].x ;
            faceRectangle.center.y = mc[largest_contour_index].y;
            faceRectangle.size.width = 0;
            faceRectangle.size.height =0;
            faceRectangle.angle = 0;
            return faceRectangle;
    }

       // cv::waitKey(3);

  }

};


int main(int argc, char** argv)
{
  ros::init(argc, argv, "Edge_Detector_second");
  Edge_Detector_second ic;
  ros::spin();
  return 0;
}

然后是根据发布的两个坐标计算深度坐标


#include <ros/ros.h>
#include <geometry_msgs/Point.h> //点 消息类型
#include <geometry_msgs/PointStamped.h> 
#include <iostream>
#include <stdio.h>


#define SUBSCRIBE_FREQUENCY 5 
#define CAMERA_MODEL_FX 538.4 
#define DISTANCE_BETWEEN_CAMERAS 0.062 
#define IMAGE_WIDTH 640 
#define MODEL_PARAMETER (CAMERA_MODEL_FX * DISTANCE_BETWEEN_CAMERAS)
using namespace std;

ros::Publisher target3DPositionPub_second;
geometry_msgs::Point left2DPoint_second, right2DPoint_second;
geometry_msgs::PointStamped target3DPosition_second; 

void leftPointCallback(const geometry_msgs::Point& leftPoint_second)
{
  left2DPoint_second = leftPoint_second;
}

void rightPointCallback(const geometry_msgs::Point& rightPoint_second)
{
  right2DPoint_second = rightPoint_second;
}

//Z = fx*T / (leftpoin - rightpoint) y坐标的值

void CalTargetPos(geometry_msgs::Point& leftPoint, geometry_msgs::Point& rightPoint)
{
  target3DPosition_second.header.frame_id = "/map";
  target3DPosition_second.header.stamp = ros::Time::now();
  if(leftPoint.x ==0 || rightPoint.x == 0 || leftPoint.x == rightPoint.x)
  {
     target3DPosition_second.point.x = 0;
     target3DPosition_second.point.y = 0;
     target3DPosition_second.point.z = 0;
  }
  else 
  {
     target3DPosition_second.point.x = DISTANCE_BETWEEN_CAMERAS*(leftPoint.x+rightPoint.x-IMAGE_WIDTH)/2/(leftPoint.x-rightPoint.x);
     target3DPosition_second.point.y = MODEL_PARAMETER / (leftPoint.x - rightPoint.x);
    target3DPosition_second.point.z = DISTANCE_BETWEEN_CAMERAS*CAMERA_MODEL_FX/(leftPoint.y - rightPoint.y);
  }
  target3DPositionPub_second.publish(target3DPosition_second);
}

int main(int argc, char **argv)
{
  ros::init(argc, argv, "cal_target_3Dposition_second"); 
  ros::NodeHandle nh; 
  ros::Rate r(SUBSCRIBE_FREQUENCY); 
  target3DPositionPub_second = nh.advertise<geometry_msgs::PointStamped>("target_3Dposition_second", 1); 
  ros::Subscriber left2DPositionSub = nh.subscribe("/edge_detector/left_2Dposition_second", 1, leftPointCallback); 
  ros::Subscriber right2DPositionSub = nh.subscribe("/edge_detector/right_2Dposition_second", 1, rightPointCallback); 
  while(ros::ok())
  {
    CalTargetPos(left2DPoint_second, right2DPoint_second);
    ros::spinOnce(); 
    r.sleep();
  }

  return 0;
}

计算深度的话 只计算了识别到的最大的物体。

  • 1
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值