ROS2-DL 基于Ubuntu20.04—通过ros2发布自己的deep learning检测结果(四)(ORB_SLAM3与yolo通过ros2通讯)

5 篇文章 0 订阅
2 篇文章 0 订阅

接着上篇继续

一、修改yolov4dm_message功能包msg消息格式

通过后续测试发现,list不太适合,所以修改为string。
在这里插入图片描述重新:colcon build
在这里插入图片描述
刷新:source ~/.bashrc

二、修改YOLOV4DM功能包中的predict.py

import sys
sys.path.append('/home/stk/ros2work/ORB_SLAM3_Grid_Mapping/Examples/YOLOV4DM/src/yolov4dm/yolov4dm')
import rclpy
from rclpy.node import Node
from rclpy.time import Time
from rclpy.clock import Clock
from rclpy.executors import MultiThreadedExecutor
#import time
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image as Images
from cv_bridge import CvBridge

from yolo import YOLO

from geometry_msgs.msg import Point
from geometry_msgs.msg import PoseArray
from sensor_msgs.msg import Image
from std_msgs.msg import Int32
from yolov4dm_message.msg import Parkpose
 

class ImageSubscriber(Node):
    def __init__(self):
        super().__init__('image_subscriber')
        self.subscription1 = self.create_subscription(Image,'/image_topic',self.image_callback,10)
        self.cv_bridge = CvBridge()
        self.subscription2 = self.create_subscription(PoseArray,'/pts_and_pose',self.slam_callback,1000)
        self.publisher_ = self.create_publisher(Parkpose, 'point_topic', 10) 
        
 
    def image_callback(self, msg):
        cv2_img = self.cv_bridge.imgmsg_to_cv2(msg,'bgr8')
        self.get_logger().info('Received image with dimensions: %s' % str(cv2_img.shape))
        msgss = Parkpose()
        obstacle_list = []
        park_list = []    
        yolo = YOLO()
        mode = "predict"
        if mode == "predict":           
            try:
                image = Images.fromarray(cv2.cvtColor(cv2_img,cv2.COLOR_BGR2RGB))        
                img_read = cv2_img
            except:
                #print('Open Error! Try again!')
                obstacle_list.append(float(2.2))
                park_list.append(float(1.1))
            try:
                r_image,obstacle_list,park_list = yolo.detect_image(image,img_read, crop = False, count=False)   
                try:
                    ob = len(obstacle_list)
                except:
                    obstacle_list = []
                    obstacle_list.append(float(2.2))
                try:
                    pa = len(park_list)
                except:
                    park_list = []
                    park_list.append(float(1.1))
                    
                #print(obstacle_list) 
                #print(park_list)
            except:
                #print('TypeError: cannot unpack non-iterable Image object')
                obstacle_list.append(float(2.2))
                park_list.append(float(1.1))
        else:    
             obstacle_list.append(float(2.2))
             park_list.append(float(1.1))
             
                 
        msgss.header.stamp = Clock().now().to_msg()
        msgss.header.frame_id = "park_link"
        msgss.obstacle = ','.join(str(item1) for item1 in obstacle_list)
        msgss.parkspace = ','.join(str(item2) for item2 in park_list)
        self.publisher_.publish(msgss)
        try:
           plt.imshow(r_image)
           plt.show()
        except:
           cc=1
        
    def slam_callback(self, msg):
        self.get_logger().info('Received yolov4dm topic: %s' % msg.poses)
        #time = msg.header.stamp.sec
        #print(time)
        #time1 = msg.header.stamp.nanosec
        #slam_pose = []
        #slam_pose = msg.poses
        #print(slam_pose)
 
def main(args=None):
    rclpy.init(args=args)
    image_subscriber = ImageSubscriber()
    rclpy.spin(image_subscriber)
    rclpy.shutdown()
 
if __name__ == '__main__':
    main()




编译:colcon build
在这里插入图片描述

三、修改ORB_SLAM3 ros2

这里的ORB_SLAM3是在我之前写的ROS2 基于ORB_SLAM3的单目相机2D格栅实时地图这篇文章的基础上修改的。

1.修改System.cc和System.h文件

System.h中,在public中添加:

void Yolov4dm(string obstacle,string parkspace);

System.cc中,添加:

void System::Yolov4dm(string obstacle,string parkspace)
{
    std::cout << "yolov4dm_obstacle:"<<obstacle<<std::endl;
    std::cout << "yolov4dm_parkspace:"<<parkspace<<std::endl;
}

删除ORB_SLAM3_Grid_Mapping中build文件夹
在这里插入图片描述

重新编译ORB_SLAM3

 ./build.sh

在这里插入图片描述

2.创建yolo_mono运行节点

1)在ORB_SLAM3_Grid_Mapping/Examples/ROS2/ORB_SLAM3/src中创建yolov4dm-monocular文件夹

在这里插入图片描述

2)在文件夹中创建并编写ros2_yolo_mono.cpp
#include <iostream>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <unistd.h>
#include <time.h>
#include <iomanip>
#include "rclcpp/rclcpp.hpp"
#include <cv_bridge/cv_bridge.h>
#include "sensor_msgs/msg/image.hpp"
#include "geometry_msgs/msg/pose_array.hpp"
#include "geometry_msgs/msg/pose_stamped.hpp"
#include "yolov4dm_message/msg/parkpose.hpp"

#include <Eigen/Dense>
#include <opencv2/core/eigen.hpp>

#include <opencv2/highgui/highgui_c.h>
#include <opencv2/highgui/highgui.hpp>
#include <Converter.h>
#include "System.h"
#include "Frame.h"
#include "Map.h"
#include "Tracking.h"
#include <opencv2/core/core.hpp>
#include "utility.hpp"
#include <math.h>
using namespace std;
using std::placeholders::_1;

using ImageMsg = sensor_msgs::msg::Image; 
using Yolov4dmMsg = yolov4dm_message::msg::Parkpose;

int all_pts_pub_gap = 0;
bool pub_all_pts = false;
int pub_count = 0;


class MonocularSlamNode : public rclcpp::Node
{
public:
    MonocularSlamNode(ORB_SLAM3::System& pSLAM);

    ~MonocularSlamNode();

//private:
    
    void GrabImage(const sensor_msgs::msg::Image::SharedPtr msg); 
    void GrabPose(cv::Mat Tcw);

    void PredictYolov4dm(const yolov4dm_message::msg::Parkpose::SharedPtr msg1);

    ORB_SLAM3::System& SLAM;
    
    int frame_id;
    rclcpp::Publisher<geometry_msgs::msg::PoseArray>::SharedPtr pub_pts_and_pose;
    rclcpp::Publisher<geometry_msgs::msg::PoseArray>::SharedPtr pub_all_kf_and_pts;
    rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr m_image_subscriber;
    rclcpp::Subscription<yolov4dm_message::msg::Parkpose>::SharedPtr m_yolov4dm_subscriber;

    //yolov4dm
    queue<Yolov4dmMsg::SharedPtr> yoloBuf_;
    std::mutex bufyolo_;
};

MonocularSlamNode::MonocularSlamNode(ORB_SLAM3::System& pSLAM)
    :   Node("ORB_SLAM3_ROS2"), SLAM(pSLAM),frame_id(0)
{
    
    m_image_subscriber = this->create_subscription<ImageMsg>("/image_raw",1,
        std::bind(&MonocularSlamNode::GrabImage, this, std::placeholders::_1)); // d435i topic
    std::cout << "slam changed" << std::endl;
    m_yolov4dm_subscriber = this->create_subscription<Yolov4dmMsg>("/point_topic",1,
        std::bind(&MonocularSlamNode::PredictYolov4dm, this, std::placeholders::_1));
}

MonocularSlamNode::~MonocularSlamNode()
{
    // Stop all threads
    SLAM.Shutdown();

    // Save camera trajectory
    SLAM.SaveKeyFrameTrajectoryTUM("KeyFrameTrajectory.txt");
}

void MonocularSlamNode::GrabImage(const ImageMsg::SharedPtr msg)
{
    // Copy the ros image message to cv::Mat.
    cv_bridge::CvImagePtr m_cvImPtr;
    try
    {
        m_cvImPtr = cv_bridge::toCvCopy(msg);
    }
    catch (cv_bridge::Exception& e)
    {
        RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
        return;
    }

    //std::cout<<"one frame has been sent"<<std::endl;
    cv::Mat Tcw;

    Sophus::SE3f Tcw_SE3f = SLAM.TrackMonocular(m_cvImPtr->image, m_cvImPtr->header.stamp.sec+(m_cvImPtr->header.stamp.nanosec * pow(10,-9)));
    Eigen::Matrix4f Tcw_Matrix = Tcw_SE3f.matrix(); 
    cv::eigen2cv(Tcw_Matrix, Tcw);  
    GrabPose(Tcw);

    ++frame_id;
}

void MonocularSlamNode::PredictYolov4dm(const Yolov4dmMsg::SharedPtr msg1)
{
    bufyolo_.lock();
    yoloBuf_.push(msg1);
    float yolov4dmdata_sec = msg1->header.stamp.sec;
    float yolov4dmdata_nsec = msg1->header.stamp.nanosec;
    float yolov4dmdata_time = msg1->header.stamp.sec+(msg1->header.stamp.nanosec*pow(10,-9));
    std::cout << "yolov4dm_time:"<<setprecision(9)<<yolov4dmdata_time<<std::endl;
    //std::cout << "yolov4dm_obstacle:"<<(msg1->obstacle)<<std::endl;
    //std::cout << "yolov4dm_parkspace:"<<(msg1->parkspace)<<std::endl;
    SLAM.Yolov4dm(msg1->obstacle,msg1->parkspace);
    bufyolo_.unlock();
}

int frame_id;
void MonocularSlamNode::GrabPose(cv::Mat Tcw)
{
    rclcpp::Publisher<geometry_msgs::msg::PoseArray>::SharedPtr pub_pts_and_pose = this->create_publisher<geometry_msgs::msg::PoseArray>("pts_and_pose", 1000);
    rclcpp::Publisher<geometry_msgs::msg::PoseArray>::SharedPtr pub_all_kf_and_pts = this->create_publisher<geometry_msgs::msg::PoseArray>("all_kf_and_pts", 1000);


    if (all_pts_pub_gap>0 && pub_count >= all_pts_pub_gap) {
        pub_all_pts = true;
        pub_count = 0;
    }

    if (pub_all_pts || SLAM.getLoopClosing()->loop_detected || SLAM.getTracker()->loop_detected) {

        pub_all_pts = SLAM.getTracker()->loop_detected = SLAM.getLoopClosing()->loop_detected = false;
        geometry_msgs::msg::PoseArray kf_pt_array;
        vector<ORB_SLAM3::KeyFrame*> key_frames = SLAM.getMap()->GetCurrentMap()->GetAllKeyFrames();
        //! placeholder for number of keyframes
        kf_pt_array.poses.push_back(geometry_msgs::msg::Pose());
        sort(key_frames.begin(), key_frames.end(), ORB_SLAM3::KeyFrame::lId);
        unsigned int n_kf = 0;
        for (auto key_frame : key_frames) {
            if (key_frame->isBad())
                continue;

            cv::Mat R;
            Eigen::Matrix3f RE = key_frame->GetPose().matrix3x4().block<3,3>(0,0).transpose();
            cv::eigen2cv(RE, R);

            vector<float> q = ORB_SLAM3::Converter::toQuaternion(R);
            cv::Mat twc;
            Eigen::Vector3f twcE= key_frame->GetCameraCenter();
            cv::eigen2cv(twcE,twc);
            geometry_msgs::msg::Pose kf_pose;

            kf_pose.position.x = twc.at<float>(0);
            kf_pose.position.y = twc.at<float>(1);
            kf_pose.position.z = twc.at<float>(2);
            kf_pose.orientation.x = q[0];
            kf_pose.orientation.y = q[1];
            kf_pose.orientation.z = q[2];
            kf_pose.orientation.w = q[3];
            kf_pt_array.poses.push_back(kf_pose);

            unsigned int n_pts_id = kf_pt_array.poses.size();
            //! placeholder for number of points
            kf_pt_array.poses.push_back(geometry_msgs::msg::Pose());
            std::set<ORB_SLAM3::MapPoint*> map_points = key_frame->GetMapPoints();
            unsigned int n_pts = 0;
            for (auto map_pt : map_points) {
                if (!map_pt || map_pt->isBad()) {
                    //printf("Point %d is bad\n", pt_id);
                    continue;
                }
                cv::Mat pt_pose;
                Eigen::Vector3f pt_poseE = map_pt->GetWorldPos();
                cv::eigen2cv(pt_poseE,pt_pose);
                if (pt_pose.empty()) {
                    //printf("World position for point %d is empty\n", pt_id);
                    continue;
                }
                geometry_msgs::msg::Pose curr_pt;
                //printf("wp size: %d, %d\n", wp.rows, wp.cols);
                //pcl_cloud->push_back(pcl::PointXYZ(wp.at<float>(0), wp.at<float>(1), wp.at<float>(2)));
                curr_pt.position.x = pt_pose.at<float>(0);
                curr_pt.position.y = pt_pose.at<float>(1);
                curr_pt.position.z = pt_pose.at<float>(2);
                kf_pt_array.poses.push_back(curr_pt);
                ++n_pts;
            }
            geometry_msgs::msg::Pose n_pts_msg;
            n_pts_msg.position.x = n_pts_msg.position.y = n_pts_msg.position.z = n_pts;
            kf_pt_array.poses[n_pts_id] = n_pts_msg;
            ++n_kf;
        }
        geometry_msgs::msg::Pose n_kf_msg;
        n_kf_msg.position.x = n_kf_msg.position.y = n_kf_msg.position.z = n_kf;
        kf_pt_array.poses[0] = n_kf_msg;
        kf_pt_array.header.frame_id = "1";
        //kf_pt_array.header.seq = frame_id + 1;
        printf("Publishing data for %u keyfranmes\n", n_kf);
        pub_all_kf_and_pts->publish(kf_pt_array);
    }
    else if (SLAM.getTracker()->mCurrentFrame.is_keyframe || !Tcw.empty()) {

        ++pub_count;
        SLAM.getTracker()->mCurrentFrame.is_keyframe = false;
        cv::Mat Rwc = Tcw.rowRange(0, 3).colRange(0, 3).t();
        cv::Mat twc = -Rwc*Tcw.rowRange(0, 3).col(3);
        vector<float> q = ORB_SLAM3::Converter::toQuaternion(Rwc);

        std::vector<ORB_SLAM3::MapPoint*> map_points = SLAM.GetTrackedMapPoints();
        int n_map_pts = map_points.size();

        geometry_msgs::msg::PoseArray pt_array;
        //pt_array.poses.resize(n_map_pts + 1);

        geometry_msgs::msg::Pose camera_pose;

        camera_pose.position.x =twc.at<float>(0);
        camera_pose.position.y =twc.at<float>(1);
        camera_pose.position.z =twc.at<float>(2);

        camera_pose.orientation.x =q[0];
        camera_pose.orientation.y =q[1];
        camera_pose.orientation.z =q[2];
        camera_pose.orientation.w =q[3];

        pt_array.poses.push_back(camera_pose);

        //printf("Done getting camera pose\n");

        for (int pt_id = 1; pt_id <= n_map_pts; ++pt_id){
            if (!map_points[pt_id - 1] || map_points[pt_id - 1]->isBad()) {
                //printf("Point %d is bad\n", pt_id);
                continue;
            }
            cv::Mat wp;
            Eigen::Vector3f wpE = map_points[pt_id - 1]->GetWorldPos();
            cv::eigen2cv(wpE,wp);

            if (wp.empty()) {
                //printf("World position for point %d is empty\n", pt_id);
                continue;
            }
            geometry_msgs::msg::Pose curr_pt;
            //printf("wp size: %d, %d\n", wp.rows, wp.cols);
            //pcl_cloud->push_back(pcl::PointXYZ(wp.at<float>(0), wp.at<float>(1), wp.at<float>(2)));
            curr_pt.position.x = wp.at<float>(0);
            curr_pt.position.y = wp.at<float>(1);
            curr_pt.position.z = wp.at<float>(2);
            pt_array.poses.push_back(curr_pt);
            //printf("Done getting map point %d\n", pt_id);
        }
        pt_array.header.frame_id = "1";
        pub_pts_and_pose->publish(pt_array);
        //pub_kf.publish(camera_pose);
    }

}



int main(int argc, char **argv)
{
    if(argc < 3)
    {
        std::cerr << "\nUsage: ros2 run orbslam mono path_to_vocabulary path_to_settings" << std::endl;
        return 1;
    }

    rclcpp::init(argc, argv);

    ORB_SLAM3::System SLAM(argv[1], argv[2], ORB_SLAM3::System::MONOCULAR, true);
    auto node = std::make_shared<MonocularSlamNode>(SLAM);
    
    std::cout << "============================ " << std::endl;
    rclcpp::spin(node);
    rclcpp::shutdown();

    return 0;
}
3)修改CMakeLists.txt

添加:

add_executable(yolo-mono
  src/yolov4dm-monocular/ros2_yolo_mono.cpp
)
ament_target_dependencies(yolo-mono rclcpp sensor_msgs geometry_msgs yolov4dm_message cv_bridge ORB_SLAM3 Pangolin)

在后面的install中添加运行节点 yolo-mono

install(TARGETS 
  mono 
  yolo-mono
  mono-inertial
  rgbd 
  stereo 
  stereo-inertial
  Monopub
  Mono-inertial_pub
  Mono-example_pub
  Mapsub
  DESTINATION lib/${PROJECT_NAME})
4)修改package.xml

添加:

<depend>yolov4dm_message</depend>
5)编译colcon build

在这里插入图片描述

6)创建运行多个节点的脚本yolov4dm_slam3_ros2.sh

在这里插入图片描述
在这里插入图片描述

7)运行
./yolov4dm_slam3_ros2.sh

在这里插入图片描述结果:
在这里插入图片描述从终端上可以看到slam3接收到了车位坐标信息和障碍物坐标信息。

(当然,可以把yolov4dm功能包中的predict.py的图像话题改为当前相机发布的话题。这里没改只是为了验证slam3能否收到检测的车位及障碍物信息。)

------------------------把predict.py的图像话题改为当前相机发布的话题------------------
结果如下所示,因为画面中没有车位信息,所以只有时不时的障碍物信息被slam收到
在这里插入图片描述

  • 3
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值