14768821104

文章描述了一个使用ROS(RobotOperatingSystem)开发的节点,它订阅RGB和深度图像数据,对图像进行处理,通过PCA和轮廓分析定位物体并估计其位姿。该代码展示了如何结合OpenCV和PCL库进行实时目标检测和姿态估计的应用。
摘要由CSDN通过智能技术生成

#include <ros/ros.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/PointCloud2.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <Eigen/Dense>
#include <Eigen/Eigenvalues>
#include <message_filters/subscriber.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/synchronizer.h>
#include <image_transport/image_transport.h>
#include <vector>
#include <pcl/common/common.h>
#include <pcl/common/pca.h>
#include <algorithm>
#include <limits>

Eigen::Matrix4f estimatePoseMatrix(const std::vector<pcl::PointXYZ>& points) {
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
    for (const auto& pt : points) {
        cloud->points.push_back(pt);
    }

    Eigen::Vector4f centroid;
    pcl::compute3DCentroid(*cloud, centroid);

    pcl::PCA<pcl::PointXYZ> pca;
    pca.setInputCloud(cloud);
    Eigen::Matrix3f eigenVectors = pca.getEigenVectors();

    Eigen::Matrix4f poseMatrix = Eigen::Matrix4f::Identity();
    poseMatrix.block<3, 3>(0, 0) = eigenVectors;
    poseMatrix.block<3, 1>(0, 3) = centroid.head<3>();

    return poseMatrix;
}

void imageCallback(const sensor_msgs::ImageConstPtr& color_msg, 
                   const sensor_msgs::PointCloud2ConstPtr& cloud_msg, 
                   const image_transport::Publisher& processed_image_pub) {
    try {
        cv_bridge::CvImagePtr cv_color = cv_bridge::toCvCopy(color_msg, sensor_msgs::image_encodings::BGR8);

        pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
        pcl::fromROSMsg(*cloud_msg, *cloud);

        cv::Mat gray_image, threshold_output;
        cv::cvtColor(cv_color->image, gray_image, cv::COLOR_BGR2GRAY);
        cv::threshold(gray_image, threshold_output, 200, 255, cv::THRESH_BINARY);

        std::vector<std::vector<cv::Point>> contours;
        cv::findContours(threshold_output, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

        double maxArea = 0;
        std::vector<cv::Point> largest_contour;
        for (const auto& contour : contours) {
            double area = cv::contourArea(contour);
            if (area > maxArea) {
                maxArea = area;
                largest_contour = contour;
            }
        }

        std::vector<pcl::PointXYZ> nearest_points;
        if (!largest_contour.empty()) {
            // 计算轮廓的中心
            cv::drawContours(cv_color->image, std::vector<std::vector<cv::Point>>{largest_contour}, -1, cv::Scalar(0, 255, 0), 2);

            // 计算轮廓点的3D坐标
            cv::Moments m = cv::moments(largest_contour);
            cv::Point2f center(m.m10 / m.m00, m.m01 / m.m00);

            // 寻找离中心最近的十个点
            std::vector<std::pair<float, pcl::PointXYZ>> distance_point_pairs;
            for (const auto& point : cloud->points) {
                float distance = std::sqrt(std::pow(point.x - center.x, 2) + std::pow(point.y - center.y, 2));
                distance_point_pairs.push_back(std::make_pair(distance, point));
            }

            std::nth_element(distance_point_pairs.begin(), distance_point_pairs.begin() + 10, distance_point_pairs.end(),
                             [](const std::pair<float, pcl::PointXYZ>& a, const std::pair<float, pcl::PointXYZ>& b) {
                                 return a.first < b.first;
                             });

            for (int i = 0; i < 10 && i < distance_point_pairs.size(); ++i) {
                nearest_points.push_back(distance_point_pairs[i].second);
            }

            // 使用这些点进行位姿估计
            if (!nearest_points.empty()) {
                Eigen::Matrix4f poseMatrix = estimatePoseMatrix(nearest_points);
                std::cout << "Estimated Pose Matrix:\n" << poseMatrix << std::endl;
            }
        }

        sensor_msgs::ImagePtr output_msg = cv_bridge::CvImage(color_msg->header, "bgr8", cv_color->image).toImageMsg();
        processed_image_pub.publish(output_msg);

    } catch (const cv_bridge::Exception& e) {
        ROS_ERROR("cv_bridge exception: %s", e.what());
    }
}

int main(int argc, char** argv) {
    ros::init(argc, argv, "object_detection");
    ros::NodeHandle nh;
    image_transport::ImageTransport it(nh);

    message_filters::Subscriber<sensor_msgs::Image> color_sub(nh, "/global_cam/rgb/rgb_raw", 1);
    message_filters::Subscriber<sensor_msgs::PointCloud2> cloud_sub(nh, "/global_cam/depth/berxel_cloudpoint", 1);
    image_transport::Publisher processed_image_pub = it.advertise("/camera/processed_image", 1);

    typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::PointCloud2> SyncPolicy;
    message_filters::Synchronizer<SyncPolicy> sync(SyncPolicy(10), color_sub, cloud_sub);
    sync.registerCallback(boost::bind(&imageCallback, _1, _2, processed_image_pub));

    ros::spin();
    return 0;
}

  • 10
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值