根据RGB信息定位点云中的点
写在前面
- 本人从事机器视觉细分的3D相机行业。编写此系列文章主要目的有:
- 1、便利他人应用3D相机,本系列文章包含公司所出售相机的SDK的使用例程及详细注释;
- 2、促进行业发展及交流。
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 欢迎与我深入交流:微信号:liu_zhisensor
流程及API说明
简要说明
- 知微传感Dkam系列3D相机中多数型号都具备输出RGB信息的功能
- 用户在使用3D相机时,往往需要RGB信息协助,通过颜色特征提取目标物体的点云
- 本例程示意如何通过RGB图中像素的位置来找到对应位置的点,核心思路是按照序号查找点云中的点,例如,3D相机的RGB图的分辨率是1944×2592,点云的分辨率是1280×1024,可以将点云按照RGB进行重排,通过对RGB信息的处理,认定想要提取第n行,第m列像素对应的点,该点即是点云中为第(1944×(n-1)+m)个点
- 点云的分辨率和RGB的分辨率不同,按照RGB对点云进行重拍后点云的分辨率与RGB相同,也因此会出现大量坐标为0的点
- 按照RGB对点云进行重排后点云的坐标系不会发生变化
根据RGB信息定位点云中点的流程
相关API
- RawdataToRgb888 将原始RGB数据转为RGB88
- void RawdataToRgb888(Camera_Object_C* camera_obj, PhotoInfo *rgb_data)
- 函数功能:根据 RGB 原始数据转换为 RGB888 图像数据
- 参数:camera_obj:相机的结构体指针; rgb_data:获取的 RGB 数据
- 返回值:无
- Fusion3DToRGB 根据RGB重排点云
- int Fusion3DToRGB(Camera_Object_C* camera_obj, PhotoInfo *rgb_data, PhotoInfo *raw_data, PhotoInfo *xyz)
- 函数功能:根据 RGB 数据重新排列点云
- 参数:camera_obj:相机的结构体指针; rgb_data:获取的 RGB 数据; raw_data: 获取的点云数据;xyz:最终重排之后的点云数据
- 返回值:0:重排成功;非 0:重排失败
例程及注释
- 本例程基于WIN10+VisualStudio2019+DkamSDK_1.6.71,采用C++语言
- 本例使用的第三方库有PCL1.12,OpenCV4.5.4,第三方库的配置方法另请查阅
- DkamSDK的配置方法请参考SDK说明书
- 本例程在D330XS型相机上验证
#include <iostream>
#include<cstring>
//DkamSDK
#include"dkam_discovery.h"
#include"dkam_gige_camera.h"
#include"dkam_gige_stream.h"
//PCL
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/pcl_visualizer.h>
//OpenCV
#include <opencv2/opencv.hpp>
int main()
{
std::cout << "Hello ZhiSENSOR!" << std::endl;
std::cout << "Hello liu_zhisensor!" << std::endl;
std::vector<DiscoveryInfo> discovery_info;
Discovery discovery;
GigeCamera camera;
GigeStream* pointgigestream = NULL;
GigeStream* graygigestream = NULL;
GigeStream* rgbgigestream = NULL;
//**********************************************查询相机****************************************************
//查询局域网内的3D相机
std::vector<DiscoveryInfo>().swap(discovery_info);
int camer_num = discovery.DiscoverCamera(&discovery_info);
std::cout << "局域网内共有" << camer_num << "台相机" << std::endl;
//显示局域网内相机的IP
for (int i = 0; i < camer_num; i++)
{
std::cout << "局域网内相机的IP为:" << discovery.ConvertIpIntToString(discovery_info[i].camera_ip) << std::endl;
}
//**********************************************连接相机****************************************************
//选定相机
int k = -1;
for (int i = 0; i < camer_num; i++)
{
if (strcmp((discovery.ConvertIpIntToString(discovery_info[i].camera_ip)), "169.254.47.100") == 0)
{
k = i;
std::cout << "将连接第" << k + 1 << "台相机" << std::endl;
}
else
{
std::cout << "局域网内无该IP的相机" << std::endl;
}
}
//连接相机
int connect = camera.CameraConnect(&discovery_info[k]);
if (connect == 0)
{
std::cout << "成功连接相机" << std::endl;
}
else
{
std::cout << "连接相机失败,请检查!!!" << std::endl;
}
//**********************************************配置相机****************************************************
if (connect == 0)
{
//获取当前红外相机的宽和高
int width = -1;
int height = -1;
std::cout << "获取相机红外图的宽和高。。。" << std::endl;
int height_gray = camera.GetCameraHeight(&height, 0);
int width_gray = camera.GetCameraWidth(&width, 0);
std::cout << "相机红外图的宽为:" << width << std::endl;
std::cout << "相机红外图的高为:" << height << std::endl;
//获取当前RGB相机的宽和高,如相机不支持则无此项
int width_RGB = -1;
int height_RGB = -1;
std::cout << "获取相机RGB图的宽和高。。。" << std::endl;
int height_rgb = camera.GetCameraHeight(&height_RGB, 1);
int width_rgb = camera.GetCameraWidth(&width_RGB, 1);
std::cout << "相机RGB图的宽为" << width_RGB << std::endl;
std::cout << "相机RGB图的高为" << height_RGB << std::endl;
//定义点云数据大小
PhotoInfo* point_data = new PhotoInfo;
point_data->pixel = new char[width * height * 6];
memset(point_data->pixel, 0, width * height * 6);
//定义RGB数据大小
PhotoInfo* RGB_data = new PhotoInfo;
RGB_data->pixel = new char[width_RGB * height_RGB * 3];
memset(RGB_data->pixel, 0, width_RGB * height_RGB * 3);
//**********************************************打开数据通道****************************************************
//开启数据流通道(0:红外 1:点云 2:RGB)
//点云
int stream_point = camera.StreamOn(1, &pointgigestream);
if (stream_point == 0)
{
std::cout << "点云通道打开成功!" << std::endl;
}
else
{
std::cout << "点云通道打开失败!!!" << std::endl;
}
//RGB图通道
int stream_RGB = camera.StreamOn(2, &rgbgigestream);
if (stream_RGB == 0)
{
std::cout << "RGB图通道打开成功!" << std::endl;
}
else
{
std::cout << "RGB图通道打开失败!!!" << std::endl;
}
//开始接受数据
int acquistion = camera.AcquisitionStart();
if (acquistion == 0)
{
std::cout << "可以开始接受数据!" << std::endl;
}
//刷新缓冲区数据
pointgigestream->FlushBuffer();
rgbgigestream->FlushBuffer();
//**********************************************等待相机上传数据***************************************
//采集点云
int capturePoint = -1;
capturePoint = pointgigestream->TimeoutCapture(point_data, 4000000);
if (capturePoint == 0)
{
std::cout << "点云接收成功!" << std::endl;
}
else
{
std::cout << "点云接收失败!!!" << std::endl;
std::cout << "失败代号:" << capturePoint << std::endl;
}
//采集RGB
int captureRGB = -1;
captureRGB = rgbgigestream->TimeoutCapture(RGB_data, 4000000);
if (captureRGB == 0)
{
std::cout << "RGB接收成功!" << std::endl;
}
else
{
std::cout << "RGB接收失败!!!" << std::endl;
std::cout << "失败代号:" << captureRGB << std::endl;
}
//**********************************************将数据OpenCV可处理格式***************************************
//RGB图
std::cout << "将RGB图转换到OpenCV格式..." << std::endl;
camera.RawdataToRgb888(*RGB_data);
cv::Mat RGBimage = cv::Mat(height_RGB, width_RGB, CV_8UC3, (void*)RGB_data->pixel);
cv::cvtColor(RGBimage,RGBimage,cv::COLOR_RGB2BGR);
//cv::imshow("RGB Picture", RGBimage);
cv::imwrite("RGBImage.bmp",RGBimage);
//**********************************************利用OpenCV提取目标像素***************************************
std::cout << "利用OpenCV提取目标像素位置..." << std::endl;
//本例中提取含有绿苹果模型的像素,颜色为绿色
//将RGB信息转换为HSV颜色空间
cv::Mat HSV;
cv::cvtColor(RGBimage,HSV,cv::COLOR_BGR2HSV);
//定义颜色范围,本例中绿色的HSV分别为H:50-70,S:43-255,V:46-255
cv::Scalar Lower(50,43,46);
cv::Scalar Upper(70,255,255);
//利用cvInRanges()获取颜色范围内的像素
cv::Mat mask;
cv::inRange(HSV, Lower, Upper, mask);
//寻找特定颜色的位置
std::vector<cv::Point> pixel;
cv::findNonZero(mask, pixel);
/*
//输出位置信息
for (int i = 0; i < pixel.size(); i++)
{
int x = pixel[i].x;
int y = pixel[i].y;
std::cout << "x:" << x << ";" << "y:" << y << std::endl;
}
*/
//保存提取的像素,方便查看是否提取成功
cv::Mat out;
RGBimage.copyTo(out,mask);
//cv::imshow("out", out);
cv::imwrite("apple.bmp", out);
//****************************************按照RGB重排点云**********************************
std::cout << "按照RGB重排点云..." << std::endl;
//开辟内存,存放重排后的点云,点云的分辨率与RGB的相同
PhotoInfo* xyz_data = new PhotoInfo;
xyz_data->pixel = new char[RGB_data->pixel_width * RGB_data->pixel_height * 6];
memset(xyz_data->pixel, 0, RGB_data->pixel_width * RGB_data->pixel_height * 6);
//重排
int FusionTag = -1;
FusionTag = camera.Fusion3DToRGB(*RGB_data, *point_data, *xyz_data);
if (FusionTag == 0)
{
std::cout << "点云重排成功!" << std::endl;
}
else
{
std::cout << "点云重排失败!!!" << std::endl;
std::cout << "失败代号:" << FusionTag << std::endl;
}
//****************************************将重排后点云数据转化为PCL可处理**********************************
std::cout << "将重排后点云数据转化为PCL可处理..." << std::endl;
//开辟内存
float* cloud = (float*)malloc(xyz_data->pixel_height * xyz_data->pixel_width * 3 * sizeof(float));
memset((void*)cloud, 0, xyz_data->pixel_height * xyz_data->pixel_width * 3 * sizeof(float));
camera.Convert3DPointFromCharToFloat(*xyz_data, cloud);
pcl::PointCloud<pcl::PointXYZ>::Ptr PointCloud_PCL(new pcl::PointCloud<pcl::PointXYZ>);
PointCloud_PCL->width = xyz_data->pixel_width;
PointCloud_PCL->height = xyz_data->pixel_height;
PointCloud_PCL->points.resize(PointCloud_PCL->width * PointCloud_PCL->height);
for (size_t i = 0; i < PointCloud_PCL->points.size(); ++i)
{
PointCloud_PCL->points[i].x = cloud[3 * i + 0];
PointCloud_PCL->points[i].y = cloud[3 * i + 1];
PointCloud_PCL->points[i].z = cloud[3 * i + 2];
}
//保存点云
pcl::io::savePCDFile("PointCloud_PCL.pcd", *PointCloud_PCL);
/*
//========================================按照内存位置 给重排后的点云赋予颜色,检查是否对齐=================================
pcl::PointCloud<pcl::PointXYZRGB>::Ptr PointCloudRGB(new pcl::PointCloud<pcl::PointXYZRGB>);
PointCloudRGB -> width = PointCloud_PCL->width;
PointCloudRGB->height = PointCloud_PCL->height;
PointCloudRGB->points.resize(PointCloudRGB->width * PointCloudRGB->height);
for (size_t i = 0; i < PointCloud_PCL->points.size(); ++i)
{
PointCloudRGB->points[i].x = PointCloud_PCL->points[i].x ;
PointCloudRGB->points[i].y = PointCloud_PCL->points[i].y ;
PointCloudRGB->points[i].z = PointCloud_PCL->points[i].z ;
PointCloudRGB->points[i].r = RGB_data->pixel[3 * i + 2];
PointCloudRGB->points[i].g = RGB_data->pixel[3 * i + 1];
PointCloudRGB->points[i].b = RGB_data->pixel[3 * i + 0];
}
pcl::io::savePCDFile("PointCloudRGB.pcd", *PointCloudRGB);
//========================================end=================================
*/
//****************************************按照RGB像素位置提取点云**********************************
std::cout << "按照RGB像素位置提取点云..." << std::endl;
pcl::PointCloud<pcl::PointXYZ>::Ptr PointCloud_EX(new pcl::PointCloud<pcl::PointXYZ>);
PointCloud_EX->width = 1;
PointCloud_EX->height = pixel.size();
PointCloud_EX->points.resize(PointCloud_EX->width * PointCloud_EX->height);
for (int i = 0; i < pixel.size(); i++)
{
PointCloud_EX->points[i].x = PointCloud_PCL->points[ ((pixel[i].y -1) * width_RGB + pixel[i].x)].x;
PointCloud_EX->points[i].y = PointCloud_PCL->points[((pixel[i].y -1) * width_RGB + pixel[i].x)].y;
PointCloud_EX->points[i].z = PointCloud_PCL->points[ ((pixel[i].y -1) * width_RGB + pixel[i].x)].z;
}
//保存提取的点云
pcl::io::savePCDFile("apple.pcd", *PointCloud_EX);
//****************************************显示提取的点云**********************************
std::cout << "显示提取的点云..." << std::endl;
pcl::visualization::PCLVisualizer::Ptr Viewer(new pcl::visualization::PCLVisualizer("3D Viewer"));
Viewer->addPointCloud<pcl::PointXYZ>(PointCloud_EX, "cloud");
Viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "cloud");
Viewer->addCoordinateSystem(1.0);
Viewer->initCameraParameters();
while (!Viewer->wasStopped())
{
Viewer->spin();
}
//**********************************************结束工作***************************************
//释放内存
delete[] point_data->pixel;
delete point_data;
delete[] RGB_data->pixel;
delete RGB_data;
RGBimage.release();
HSV.release();
mask.release();
out.release();
delete[] xyz_data->pixel;
delete xyz_data;
free (cloud);
//关闭数据流通道
int streamoff_point = camera.StreamOff(1, pointgigestream);
int streamoff_rgb = camera.StreamOff(2, rgbgigestream);
//断开相机连接
int disconnect = camera.CameraDisconnect();
std::cout << "工作结束!!!!!!" << std::endl;
}
cv::waitKey(0);
return 0;
}
运行结果
-
3D相机采集的RGB数据
-
提取目标像素结果
-
按照RGB重排后的点云
-
按照RGB像素位置提取的点云
-
以上结果可以看出,可以利用按照RGB重排点云来实现按照RGB像素位置提取对应点的功能
后记
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 如有问题,欢迎与我深入交流:微信号:liu_zhisensor