根据RGB信息定位点云中的点:点云RGB融合
写在前面
- 本人从事机器视觉细分的3D相机行业。编写此系列文章主要目的有:
- 1、便利他人应用3D相机,本系列文章包含公司所出售相机的SDK的使用例程及详细注释;
- 2、促进行业发展及交流。
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 欢迎与我深入交流:微信号:liu_zhisensor
流程及API说明
简要说明
- 知微传感Dkam系列3D相机中多数型号都具备输出RGB信息的功能
- 用户在使用3D相机时,往往需要RGB信息协助,通过颜色特征提取目标物体的点云
- 本例程示意如何通过RGB图中像素的位置来找到对应位置的点,核心思路是利用官方提供的融合点云和RGB的API得到融合后的数据,在该数据中提取RGB信息组成新的RGB数据,再对该RGB数据进行处理,得到想要的像素位置,提取对应位置的点
- 点云和RGB融合后分辨率与点云相同,无效点没有对应的RGB信息
- 融合后再提取RGB在步骤上比利用外参直接对比原始RGB数据和点云麻烦,但却避免了无效点的影响
根据RGB信息定位点云中点的流程
相关API
- RawdataToRgb888 将原始RGB数据转为RGB88
- void RawdataToRgb888(Camera_Object_C* camera_obj, PhotoInfo *rgb_data)
- 函数功能:根据 RGB 原始数据转换为 RGB888 图像数据
- 参数:camera_obj:相机的结构体指针; rgb_data:获取的 RGB 数据
- 返回值:无
- FusionImageTo3D 融合点云和RGB
- int FusionImageTo3D(PhotoInfo &image_data, PhotoInfo &raw_data, float * image_cloud)
- 函数功能: 将采集到点云数据和红外或者 RGB 融合
- 参 数: image_data:采集的 RGB 或者红外数据;raw_data:采集的点云数据; image_cloud:点云融合之后的数据
- 返回值: 0:成功 ;非 0:失败
例程及注释
- 本例程基于WIN10+VisualStudio2019+DkamSDK_1.6.71,采用C++语言
- 本例使用OpenCV4.5.4,第三方库的配置方法另请查阅
- DkamSDK的配置方法请参考SDK说明书
- 本例程在D330XS型相机上验证
#include <iostream>
#include <cstring>
#include <fstream>
#include <iomanip>
//DkamSDK
#include"dkam_discovery.h"
#include"dkam_gige_camera.h"
#include"dkam_gige_stream.h"
//OpenCV
#include <opencv2/opencv.hpp>
int main()
{
std::cout << "Hello ZhiSENSOR!" << std::endl;
std::cout << "微信号:liu_zhisensor!" << std::endl;
std::vector<DiscoveryInfo> discovery_info;
Discovery discovery;
GigeCamera camera;
GigeStream* pointgigestream = NULL;
GigeStream* graygigestream = NULL;
GigeStream* rgbgigestream = NULL;
//**********************************************查询相机****************************************************
//查询局域网内的3D相机
std::vector<DiscoveryInfo>().swap(discovery_info);
int camer_num = discovery.DiscoverCamera(&discovery_info);
std::cout << "局域网内共有" << camer_num << "台相机" << std::endl;
//显示局域网内相机的IP
for (int i = 0; i < camer_num; i++)
{
std::cout << "局域网内相机的IP为:" << discovery.ConvertIpIntToString(discovery_info[i].camera_ip) << std::endl;
}
//**********************************************连接相机****************************************************
//选定相机
int k = -1;
for (int i = 0; i < camer_num; i++)
{
if (strcmp((discovery.ConvertIpIntToString(discovery_info[i].camera_ip)), "169.254.47.100") == 0)
{
k = i;
std::cout << "将连接第" << k + 1 << "台相机" << std::endl;
}
else
{
std::cout << "局域网内无该IP的相机" << std::endl;
}
}
//连接相机
int connect = camera.CameraConnect(&discovery_info[k]);
if (connect == 0)
{
std::cout << "成功连接相机" << std::endl;
}
else
{
std::cout << "连接相机失败,请检查!!!" << std::endl;
}
//**********************************************配置相机****************************************************
if (connect == 0)
{
//获取当前红外相机的宽和高
int width = -1;
int height = -1;
std::cout << "获取相机红外图的宽和高。。。" << std::endl;
int height_gray = camera.GetCameraHeight(&height, 0);
int width_gray = camera.GetCameraWidth(&width, 0);
std::cout << "相机红外图的宽为:" << width << std::endl;
std::cout << "相机红外图的高为:" << height << std::endl;
//获取当前RGB相机的宽和高,如相机不支持则无此项
int width_RGB = -1;
int height_RGB = -1;
std::cout << "获取相机RGB图的宽和高。。。" << std::endl;
int height_rgb = camera.GetCameraHeight(&height_RGB, 1);
int width_rgb = camera.GetCameraWidth(&width_RGB, 1);
std::cout << "相机RGB图的宽为" << width_RGB << std::endl;
std::cout << "相机RGB图的高为" << height_RGB << std::endl;
//定义点云数据大小
PhotoInfo* point_data = new PhotoInfo;
point_data->pixel = new char[width * height * 6];
memset(point_data->pixel, 0, width * height * 6);
//定义RGB数据大小
PhotoInfo* RGB_data = new PhotoInfo;
RGB_data->pixel = new char[width_RGB * height_RGB * 3];
memset(RGB_data->pixel, 0, width_RGB * height_RGB * 3);
//**********************************************打开数据通道****************************************************
//开启数据流通道(0:红外 1:点云 2:RGB)
//点云
int stream_point = camera.StreamOn(1, &pointgigestream);
if (stream_point == 0)
{
std::cout << "点云通道打开成功!" << std::endl;
}
else
{
std::cout << "点云通道打开失败!!!" << std::endl;
}
//RGB图通道
int stream_RGB = camera.StreamOn(2, &rgbgigestream);
if (stream_RGB == 0)
{
std::cout << "RGB图通道打开成功!" << std::endl;
}
else
{
std::cout << "RGB图通道打开失败!!!" << std::endl;
}
//开始接受数据
int acquistion = camera.AcquisitionStart();
if (acquistion == 0)
{
std::cout << "可以开始接受数据!" << std::endl;
}
//刷新缓冲区数据
pointgigestream->FlushBuffer();
rgbgigestream->FlushBuffer();
//**********************************************等待相机上传数据***************************************
//采集点云
int capturePoint = -1;
capturePoint = pointgigestream->TimeoutCapture(point_data, 4000000);
if (capturePoint == 0)
{
std::cout << "点云接收成功!" << std::endl;
}
else
{
std::cout << "点云接收失败!!!" << std::endl;
std::cout << "失败代号:" << capturePoint << std::endl;
}
//采集RGB
int captureRGB = -1;
captureRGB = rgbgigestream->TimeoutCapture(RGB_data, 4000000);
if (captureRGB == 0)
{
std::cout << "RGB接收成功!" << std::endl;
}
else
{
std::cout << "RGB接收失败!!!" << std::endl;
std::cout << "失败代号:" << captureRGB << std::endl;
}
//保存RGB数据
int saveRGB = camera.SaveToBMP(*RGB_data,(char *)"RGBdata.bmp");
//**********************************************融合点云和RGB***************************************
//开辟内存存放融合后数据
float* rgb_cloud = new float[width * height * 6 * sizeof(float)];
memset(rgb_cloud, 0, width * height * 6);
//融合
int FusionTag = -1;
FusionTag = camera.FusionImageTo3D(*RGB_data,*point_data,rgb_cloud);
if (FusionTag == 0)
{
std::cout << "RGB点云融合成功!" << std::endl;
}
else
{
std::cout << "RGB点云融合失败!!!" << std::endl;
std::cout << "失败代号:" << FusionTag << std::endl;
}
//保存以方便查看融合结果
int savePointRGB = camera.SavePointCloudWithImageToTxt(*point_data, rgb_cloud, (char*)"pc-rgb.txt");
//**********************************************提取RGB***************************************
std::cout << "提取RGB..." << std::endl;
PhotoInfo* RGB_EX = new PhotoInfo;
RGB_EX->pixel = new char[width * height * 3];
memset(RGB_EX->pixel, 0, width * height * 3);
for (size_t i = 0; i < width * height; i++)
{
RGB_EX->pixel[3 * i + 0] = rgb_cloud[6 * i + 3];
RGB_EX->pixel[3 * i + 1] = rgb_cloud[6 * i + 4];
RGB_EX->pixel[3 * i + 2] = rgb_cloud[6 * i + 5];
}
//**********************************************将数据OpenCV可处理格式***************************************
//RGB图
std::cout << "将提取后RGB转换到OpenCV格式..." << std::endl;
camera.RawdataToRgb888(*RGB_EX);
cv::Mat RGBimage = cv::Mat(height, width, CV_8UC3, (void*)RGB_EX->pixel);
cv::cvtColor(RGBimage,RGBimage,cv::COLOR_RGB2BGR);
//cv::imshow("RGB Picture", RGBimage);
cv::imwrite("RGBImage.bmp",RGBimage);
//**********************************************利用OpenCV提取目标像素位置***************************************
//本例中提取含有芒果的像素,颜色为黄色
//将RGB信息转换为HSV颜色空间
cv::Mat HSV;
cv::cvtColor(RGBimage,HSV,cv::COLOR_BGR2HSV);
//定义颜色范围,本例中黄色色的HSV分别为H:20-45,S:43-255,V:46-255
cv::Scalar Lower(20,43,46);
cv::Scalar Upper(45,255,255);
//利用cvInRanges()获取颜色范围内的像素
cv::Mat mask;
cv::inRange(HSV, Lower, Upper, mask);
//寻找特定颜色的位置
std::vector<cv::Point> pixel;
cv::findNonZero(mask, pixel);
/*
//输出位置信息
for (int i = 0; i < pixel.size(); i++)
{
int x = pixel[i].x;
int y = pixel[i].y;
std::cout << "x:" << x << ";" << "y:" << y << std::endl;
}
*/
//保存提取的像素,方便查看是否提取成功
cv::Mat out;
RGBimage.copyTo(out,mask);
//cv::imshow("out", out);
cv::imwrite("mango.bmp", out);
//**********************************************按照内存位置提取点**********************************************
std::cout << "按照内存位置提取点..." << std::endl;
//开辟内存
float* cloud = (float*)malloc(point_data->pixel_height * point_data->pixel_width * 3 * sizeof(float));
memset((void*)cloud, 0, point_data->pixel_height* point_data->pixel_width * 3 * sizeof(float));
camera.Convert3DPointFromCharToFloat(*point_data, cloud);
/*
//输出,方便查看
for (int i = 0; i < pixel.size(); i++)
{
if (cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 2] != 0)
{
std::cout << cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 0] << std::endl;
std::cout << cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 1] << std::endl;
std::cout << cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 2] << std::endl;
}
}
*/
//保存到本地txt
std::cout << "将提取的点保存到txt..." << std::endl;
std::ofstream out_txt_file;
out_txt_file.open("EX_PointCloud.txt",std::ios::out | std::ios::trunc);
out_txt_file << std::fixed;//float
for (int i = 0; i < pixel.size(); i++)
{
if (cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 2] != 0)
{
out_txt_file << std::setprecision(2) << cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 0] << " "<< cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 1] << " " << cloud[((pixel[i].y - 1) * width + pixel[i].x) * 3 + 2] << std::endl;
}
}
//保存
out_txt_file.close();
//**********************************************结束工作***************************************
//释放内存
delete[] point_data->pixel;
delete point_data;
delete[] RGB_data->pixel;
delete RGB_data;
delete [] rgb_cloud;
delete[] RGB_EX->pixel;
delete[] RGB_EX;
RGBimage.release();
HSV.release();
mask.release();
out.release();
free(cloud);
//关闭数据流通道
int streamoff_point = camera.StreamOff(1, pointgigestream);
int streamoff_rgb = camera.StreamOff(2, rgbgigestream);
//断开相机连接
int disconnect = camera.CameraDisconnect();
std::cout << "工作结束!!!!!!" << std::endl;
}
cv::waitKey(0);
return 0;
}
运行结果
-
原始RGB数据
-
点云和RGB融合后再提取的RGB数据
-
根据颜色提取的目标像素
-
融合RGB后的点云
-
按像素提取的点
后记
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 如有问题,欢迎与我深入交流:微信号:liu_zhisensor