ROS 基于yolo的本地视频识别
ros包创建
这里默认我们已经创建了工作空间
如果没有,我们需要执行以下几步(已经创建的可以跳过):
1、首先创建一个工作目录文件
mkdir -p ./catkin_ws/src
2、在工作区目录下执行执行
catkin_make
3、在src目录下建立包(如果已经创建的,可以直接从这一步开始):
catkin_create_pkg local_vedio_detect std_msgs rospy roscpp
其中local_vedio_detect为包名
以上三步参考链接ros创建包
话题模型
创建vediopub.cpp
在loca_vedio_detect下的src创建vediopub.cpp文件
下面是vediopub.cpp文件内容
#include<ros/ros.h>
#include<cv_bridge/cv_bridge.h>
#include<sensor_msgs/image_encodings.h>
#include<image_transport/image_transport.h>
#include<opencv2/opencv.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// ROS节点初始化
ros::init(argc, argv, "image_publisher");
ros::NodeHandle n;
ros::Time time = ros::Time::now();
ros::Rate loop_rate(5);
// 定义节点句柄
image_transport::ImageTransport it(n);
image_transport::Publisher pub = it.advertise("/camera/image_raw", 1);
sensor_msgs::ImagePtr msg;
// opencv准备读取视频
cv::VideoCapture video;
video.open("/home/winston/catkin_ws/src/local_vedio_detect/data/11.mp4");
if( !video.isOpened() )
{
ROS_INFO("Read Video failed!\n");
return 0;
}
Mat frame;
int count = 0;
while(1)
{
video >> frame;
if( frame.empty() )
break;
count++;
msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", frame).toImageMsg();
pub.publish(msg);
ROS_INFO( "read the %dth frame successfully!", count );
loop_rate.sleep();
ros::spinOnce();
}
video.release();
return 0;
}
注意,
// 定义节点句柄
image_transport::ImageTransport it(n);
image_transport::Publisher pub = it.advertise("/camera/image_raw", 1);
sensor_msgs::ImagePtr msg;
这一部分中的 /camera/image_raw 要和roa.yaml的camera_reading中的topic一致
创建vediosub.cpp
以下是代码内容:
#include<ros/ros.h>
#include<cv_bridge/cv_bridge.h>
#include<sensor_msgs/image_encodings.h>
#include<image_transport/image_transport.h>
#include<opencv2/opencv.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<stdio.h>
#include<math.h>
#include<vector>
void imageCalllback(const sensor_msgs::ImageConstPtr& msg)
{
ROS_INFO("Received \n");
try{
cv::imshow( "video", cv_bridge::toCvShare(msg, "bgr8")->image );
cv::waitKey(30);
}
catch( cv_bridge::Exception& e )
{
ROS_ERROR( "Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str() );
}
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "image_listener");
ros::NodeHandle n;
cv::namedWindow("video");
cv::startWindowThread();
image_transport::ImageTransport it(n);
image_transport::Subscriber sub = it.subscribe( "video_image", 1, imageCalllback );
ros::spin();
cv::destroyWindow("video");
return 0;
}
CMakeList.txt的修改
CMakeList.txt在local_vedio_detect文件下,下图是是cmakelist的初始样子,蓝色的都是注释,我们要在这个的基础上进行一些修改
首先找到这一行
把这段代码替换选出来的内容
find_package(catkin REQUIRED COMPONENTS
cv_bridge
image_transport
roscpp
rospy
sensor_msgs
std_msgs
)
find_package(OpenCV REQUIRED)
然后找到这一行
修改为:
include_directories(include ${catkin_INCLUDE_DIRS})
include_directories(include ${OpenCV_INCLUDE_DIRS})
add_executable(vediopub src/vediopub.cpp)
target_link_libraries(vediopub ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
#add_dependencies(mytest opencvexam_gencpp)
add_executable(vediosub src/vediosub.cpp)
target_link_libraries(vediosub ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
进行catkin_make
不出以外的话,会编译成功
运行
最后就是运行了
首先需要到catkin_ws目录下,打开终端输入roscore
roscore
然后打开另一个终端
输入
rosrun local_vedio_detect vediopub
如果看到这样的结果,那代表运行成功了
调用darknet_ros
为了能够看到视频的检测结果,我们需要调用darnet_ros的内容
下图就是darknet_ros的文件
github原文链接
配置过程
在catkin_ws目录下打开终端,输入
roslaunch darknet_ros darknet_ros.launch
再打开一个终端
rosrun local_vedio_detect vediopub
最后就能得到上图的检测结果
有什么问题在下面留言哦,我看到了解答
参考博客:
ROS中订阅和发布视频中的图像消息