OpenCV读入3D相机数据
写在前面
- 本人从事机器视觉细分的3D相机行业。编写此系列文章主要目的有:
- 1、便利他人应用3D相机,本系列文章包含公司所出售相机的SDK的使用例程及详细注释;
- 2、促进行业发展及交流。
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 欢迎与我深入交流:微信号:liu_zhisensor
OpenCV读取数据说明
基本说明
- 知微传感的3D相机可以输出红外图(灰度图)和RGB图以便利2D+3D的应用场景
- 红外图和RGB图在相机端是以char类型存储并在上位机中以char类型接收的
- 深度图是从点云中提取的Z值,按照红外图的顺序存放的,在提取数据的过程中将char类型转为了short类型
相关API
- GetCloudPlaneZ 获取Z轴数据
- int GetCloudPlaneZ(Camera_Object_C* camera_obj, PhotoInfo *raw_data, short *imagedata)
- 函数功能:获取点云 Z 平面数据
- 头文件:dkam_zhicamera_api.h
- 参数:camera_obj:相机的结构体指针; raw_data:点云数据; imagedata:获取的 Z 平面数据
- 返回值:0:获取成功; 非 0:获取失败
例程及注释
- 本例程基于WIN10+VisualStudio2019+DkamSDK_1.6.71+OpenCV4.5.4,采用C++语言
- DkamSDK的配置方法请参考SDK说明书
- OpenCV的配置方法另请查阅
- 除自己安装配置OpenCV外,还可以利用知微传感提供的DkamSDK自带的OpenCV,在include和lib文件夹内
- 本例程在D132S型相机上验证
#include <iostream>
//DkamSDK
#include"dkam_discovery.h"
#include"dkam_gige_camera.h"
#include"dkam_gige_stream.h"
#include"dkam_zhicamera_api.h"
//OpenCV
#include <opencv2/opencv.hpp>
int main()
{
std::cout << "Hello ZhiSENSOR!" <<std::endl;
std::cout << "Hello liu_sensor!" << std::endl;
std::vector<DiscoveryInfo> discovery_info;
Discovery discovery;
GigeCamera camera;
GigeStream* pointgigestream = NULL;
GigeStream* graygigestream = NULL;
GigeStream* rgbgigestream = NULL;
//**********************************************查询相机****************************************************
//查询局域网内的3D相机
std::vector<DiscoveryInfo>().swap(discovery_info);
int camer_num = discovery.DiscoverCamera(&discovery_info);
std::cout << "局域网内共有" << camer_num << "台相机" << std::endl;
//显示局域网内相机的IP
for (int i = 0; i < camer_num; i++)
{
std::cout << "局域网内相机的IP为:" << discovery.ConvertIpIntToString(discovery_info[i].camera_ip) << std::endl;
}
//**********************************************连接相机****************************************************
//选定相机
int k = -1;
for (int i = 0; i < camer_num; i++)
{
if (strcmp((discovery.ConvertIpIntToString(discovery_info[i].camera_ip)), "192.168.30.35") == 0)
{
k = i;
std::cout << "将连接第" << k + 1 << "台相机" << std::endl;
}
else
{
std::cout << "局域网内无该IP的相机" << std::endl;
}
}
//连接相机
int connect = camera.CameraConnect(&discovery_info[k]);
if (connect == 0)
{
std::cout << "成功连接相机" << std::endl;
}
else
{
std::cout << "连接相机失败,请检查!!!" << std::endl;
}
//**********************************************配置相机****************************************************
if (connect == 0)
{
//获取当前红外相机的宽和高
int width = -1;
int height = -1;
std::cout << "获取相机红外图的宽和高。。。" << std::endl;
int height_gray = camera.GetCameraHeight(&height, 0);
int width_gray = camera.GetCameraWidth(&width, 0);
std::cout << "相机红外图的宽为:" << width << std::endl;
std::cout << "相机红外图的高为:" << height << std::endl;
//获取当前RGB相机的宽和高,如相机不支持则无此项
int width_RGB = -1;
int height_RGB = -1;
std::cout << "获取相机RGB图的宽和高。。。" << std::endl;
int height_rgb = camera.GetCameraHeight(&height_RGB, 1);
int width_rgb = camera.GetCameraWidth(&width_RGB, 1);
std::cout << "相机RGB图的宽为" << width_RGB << std::endl;
std::cout << "相机RGB图的高为" << height_RGB << std::endl;
//定义红外数据大小
PhotoInfo* gray_data = new PhotoInfo;
gray_data->pixel = new char[width * height];
memset(gray_data->pixel, 0, width * height);
//定义点云数据大小
PhotoInfo* point_data = new PhotoInfo;
point_data->pixel = new char[width * height * 6];
memset(point_data->pixel, 0, width * height * 6);
//定义RGB数据大小
PhotoInfo* RGB_data = new PhotoInfo;
RGB_data->pixel = new char[width_RGB * height_RGB * 3];
memset(RGB_data->pixel, 0, width_RGB * height_RGB * 3);
//**********************************************打开数据通道****************************************************
//开启数据流通道(0:红外 1:点云 2:RGB)
//红外图
int stream_gray = camera.StreamOn(0, &graygigestream);
if (stream_gray == 0)
{
std::cout << "红外图通道打开成功!" << std::endl;
}
else
{
std::cout << "红外图通道打开失败!!!" << std::endl;
}
//点云
int stream_point = camera.StreamOn(1, &pointgigestream);
if (stream_point == 0)
{
std::cout << "点云通道打开成功!" << std::endl;
}
else
{
std::cout << "点云通道打开失败!!!" << std::endl;
}
//RGB图通道
int stream_RGB = camera.StreamOn(2, &rgbgigestream);
if (stream_RGB == 0)
{
std::cout << "RGB图通道打开成功!" << std::endl;
}
else
{
std::cout << "RGB图通道打开失败!!!" << std::endl;
}
//开始接受数据
int acquistion = camera.AcquisitionStart();
if (acquistion == 0)
{
std::cout << "可以开始接受数据!" << std::endl;
}
//刷新缓冲区数据
pointgigestream->FlushBuffer();
graygigestream->FlushBuffer();
rgbgigestream->FlushBuffer();
//**********************************************等待相机上传数据***************************************
//采集红外
int captureGray = -1;
captureGray = graygigestream->TimeoutCapture(gray_data, 3000000);
if (captureGray == 0)
{
std::cout << "红外接收成功!" << std::endl;
}
else
{
std::cout << "红外接收失败!!!" << std::endl;
std::cout << "失败代号:" << captureGray << std::endl;
}
//采集点云
int capturePoint = -1;
capturePoint = pointgigestream->TimeoutCapture(point_data, 3000000);
if (capturePoint == 0)
{
std::cout << "点云接收成功!" << std::endl;
}
else
{
std::cout << "点云接收失败!!!" << std::endl;
std::cout << "失败代号:" << capturePoint << std::endl;
}
//采集RGB
int captureRGB = -1;
captureRGB = rgbgigestream->TimeoutCapture(RGB_data, 3000000);
if (captureRGB == 0)
{
std::cout << "RGB接收成功!" << std::endl;
}
else
{
std::cout << "RGB接收失败!!!" << std::endl;
std::cout << "失败代号:" << captureRGB << std::endl;
}
//保存数据到本地
//保存红外数据
int savegray = camera.SaveToBMP(*gray_data, (char*)"Gray.bmp");
if (savegray == 0)
{
std::cout << "红外图保存成功!" << std::endl;
}
else
{
std::cout << "红外图保存失败!!!" << std::endl;
}
//保存RGB数据
int savergb = camera.SaveToBMP(*RGB_data, (char*)"RGB.bmp");
if (savergb == 0)
{
std::cout << "RGB图保存成功!" << std::endl;
}
else
{
std::cout << "RGB图保存失败!!!" << std::endl;
}
//保存深度图
int savedepth = camera.SaveDepthToPng(*point_data, (char*)"Depth.png");
if (savedepth == 0)
{
std::cout << "深度图保存成功!" << std::endl;
}
else
{
std::cout << "深度图保存失败!!!" << std::endl;
}
//**********************************************将数据OpenCV可处理格式***************************************
//灰度图
std::cout << "将红外图转换到OpenCV格式,并显示..." << std::endl;
cv::Mat GrayImage = cv::Mat(height, width, CV_8UC1, (void*)gray_data->pixel);
//显示灰度图
cv::imshow("Gray Picture", GrayImage);
//RGB图
std::cout << "将RGB图转换到OpenCV格式,并显示..." << std::endl;
cv::Mat RGBimage = cv::Mat(height_RGB, width_RGB, CV_8UC3, (void*)RGB_data->pixel);
//前面有使用API保存RGB到本地的操作,这个操作会将RGB数据转为888格式,因此直接使用上面这句代码会成功,如果没有保存RGB这一步,需要先将RGB数据转为RGB888后,使用上面的代码才能成功,可以使用RawdataToRgb888(*RGB_data)这个函数进行更改
cv::imshow("RGB Picture", RGBimage);
//深度图
std::cout << "提取深度数据,转换到OpenCV格式,并显示..." << std::endl;
short* depthdata = (short*)malloc(width * height * 2 * sizeof(short));
memset((void*)depthdata,0, width * height * 2 * sizeof(short));
int getZ = camera.GetCloudPlaneZ(*point_data, depthdata);
if (getZ == 0)
{
std::cout << "提取点云Z值成功!" << std::endl;
}
else
{
std::cout << "提取点云Z值失败!!!" << std::endl;
}
cv::Mat DepthImage = cv::Mat(height, width, CV_16U, (void*)depthdata);
cv::imshow("Depth Picture", DepthImage);
std::cout << "注意:深度图像素值乘以最小单位才是真实物理世界的距离。" << std::endl;
std::cout << "各型号最小单位如下:" << std::endl;
std::cout << "D132S: 0.05mm" << std::endl;
std::cout << "D300: 1/60mm" << std::endl;
std::cout << "D330XS:0.02mm" << std::endl;
std::cout << "D330S: 0.02mm" << std::endl;
std::cout << "D330M: 0.05mm" << std::endl;
std::cout << "D330L: 0.05mm" << std::endl;
//**********************************************结束工作***************************************
memset(point_data->pixel, 0, width* height * 6);
memset(gray_data->pixel, 0, width* height);
memset(RGB_data->pixel, 0, width_RGB* height_RGB * 3);
//释放内存
delete[] point_data->pixel;
delete point_data;
delete[] gray_data->pixel;
delete gray_data;
delete[] RGB_data->pixel;
delete RGB_data;
delete depthdata;
//关闭数据流通道
int streamoff_gray = camera.StreamOff(0, graygigestream);
int streamoff_point = camera.StreamOff(1, pointgigestream);
int streamoff_rgb = camera.StreamOff(2, rgbgigestream);
//断开相机连接
int disconnect = camera.CameraDisconnect();
std::cout << "工作结束!!!!!!" << std::endl;
}
else
{
std::cout << "相机连接失败!!!" << std::endl;
std::cout << "请排查原因,代号:" << connect << std::endl;
}
cv::waitKey(0);
}
运行结果
结果对比
-
使用知微传感自带的保存API保存的图片和OpenCV显示结果对比(仅示意了红外图)
-
可以看出用OpenCV显示的图片和用官方体供的API保存的数据相同,以上代码可行
后记
- 知微传感Dkam系列3D相机可以应用于定位分拣、焊接引导、逆向建模、检测测量等领域
- 如有问题,欢迎与我深入交流:微信号:liu_zhisensor