前言:
本文中图像采集选取硬件为3D轮廓传感器,也称3D线激光或3D相机,至于其硬件采图原理(三角测量法)这里就不做详细说明,想了解的小伙伴可以去网上搜索相关资料,这里我们着重讲解基于halcon算子,获取深度图,亮度图,以及如何将深度图转换成点云图,并进行显示。
1) 连接外部3D相机后,我们一般需要设置帧/行触发方式,以及图像行高,保证产品扫描的完整性;然后采集过程中,不论使用轮询采集或者图像回调的方式,在图像数据返回的事件中,取得图像的源数据(指针,数组,实例对象,图像信息包括图像格式,行高等),然后通过halcon算子“GenImage1”即可完整数据的转换,最终可输出halcon数据对象hobject表示的图像,下面所示代码即对此做出具体演示
图像轮询线程:
/// <summary>
/// 轮询取图流程
/// </summary>
private void ReceiveThreadProcess()
{
int nRet = (int)Mv3dLpSDK.MV3D_LP_OK;
STC_DataSet pDataSet = IntPtr.Zero;
UInt32 nTimeOut = 50;
while (m_bGrabbing)
{
Thread.Sleep(1);
MV3D_LP_IMAGE_DATA pstImage = new MV3D_LP_IMAGE_DATA();
nRet = Mv3dLpSDK.MV3D_LP_GetImage(m_DevHandle, pstImage, nTimeOut);
if (0 == nRet)
{
try
{
nRet = DisplayImage(pstImage);//存储原始数据,方便后续获取其他图像
GrabFlagEvent?.Invoke((int)Mv3dLpSDK.MV3D_LP_OK == nRet);//原始输出存储完成
if ((int)Mv3dLpSDK.MV3D_LP_OK != nRet)
{
throw new ArgumentException(nRet.ToString());
}
}
catch
{
Console.WriteLine("ERROR !\r\n");
}
}
else
{
continue;
}
}
}
图像数据缓存:
/// <summary>
/// 图像显示
/// </summary>
/// <param name="pstImage"></param>
/// <returns></returns>
private int DisplayImage(MV3D_LP_IMAGE_DATA pstImage)
{
int nRet = (int)Mv3dLpSDK.MV3D_LP_OK;
Monitor.Enter(Lock);
m_stImageInfo.nWidth = pstImage.nWidth;
m_stImageInfo.nHeight = pstImage.nHeight;
m_stImageInfo.nDataLen = pstImage.nDataLen;
m_stImageInfo.nIntensityDataLen = pstImage.nIntensityDataLen;
m_stImageInfo.enImageType = pstImage.enImageType;
m_stImageInfo.nFrameNum = pstImage.nFrameNum;
m_stImageInfo.fXScale = pstImage.fXScale;
m_stImageInfo.fYScale = pstImage.fYScale;
m_stImageInfo.fZScale = pstImage.fZScale;
m_stImageInfo.nXOffset = pstImage.nXOffset;
m_stImageInfo.nYOffset = pstImage.nYOffset;
m_stImageInfo.nZOffset = pstImage.nZOffset;
if (m_nMaxImageSize < pstImage.nDataLen)
{
m_pcDataBuf = new byte[pstImage.nDataLen];
m_nMaxImageSize = pstImage.nDataLen;
}
if (m_nMaxImageSize2 < pstImage.nIntensityDataLen)
{
m_pcDataBuf2 = new byte[pstImage.nIntensityDataLen];
m_nMaxImageSize2 = pstImage.nIntensityDataLen;
}
m_stImageInfo.pData = Marshal.UnsafeAddrOfPinnedArrayElement(m_pcDataBuf, 0);
m_stImageInfo.pIntensityData = Marshal.UnsafeAddrOfPinnedArrayElement(m_pcDataBuf2, 0);
Marshal.Copy(pstImage.pData, m_pcDataBuf, 0, (int)pstImage.nDataLen);
Marshal.Copy(pstImage.pIntensityData, m_pcDataBuf2, 0, (int)pstImage.nIntensityDataLen);
Monitor.Exit(Lock);
//不在此处显示
//nRet = Mv3dLpSDK.MV3D_LP_DisplayImage(pstImage, m_hWnd, Mv3dLpSDK.DisplayType_Auto, 0, 0);
return nRet;
}
通过源数据生成深度图hobject
/// <summary>
/// 获取深度图(.tiff)
/// </summary>
/// <returns></returns>
public HObject GetDepthImage()
{
HObject ho_Image = null;
Monitor.Enter(Lock);
GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf, GCHandleType.Pinned);
IntPtr ptr = hBuf.AddrOfPinnedObject();
HOperatorSet.GenImage1(out ho_Image, "int2", m_stImageInfo.nWidth, m_stImageInfo.nHeight, ptr.ToInt64());
Monitor.Exit(Lock);
if (hBuf.IsAllocated)
{
hBuf.Free();
}
//if (null != ho_Image)
//{
// // 保存深度图 | Save Depth
// string strtiffName = "./Halcon_Image_";
// strtiffName += m_stImageInfo.nFrameNum;
// strtiffName += ".tiff";
// HOperatorSet.WriteImage(ho_Image, "tiff", 0, strtiffName);
//}
return ho_Image;
}
下图为显示的深度图:
2)通过相似的方法我们可以获取亮度图
/// <summary>
/// 获取亮度图(.bmp)
/// </summary>
/// <returns></returns>
public HObject GetIntensityImage()
{
HObject ho_Image = null;
Monitor.Enter(Lock);
GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf2, GCHandleType.Pinned);
IntPtr ptr = hBuf.AddrOfPinnedObject();
HOperatorSet.GenImage1(out ho_Image, "byte", m_stImageInfo.nWidth, m_stImageInfo.nHeight, ptr.ToInt64());
Monitor.Exit(Lock);
if (hBuf.IsAllocated)
{
hBuf.Free();
}
//if (null != ho_Image)
//{
// // 保存深度图 | Save Depth
// string strtiffName = "./Halcon_Image_";
// strtiffName += m_stImageInfo.nFrameNum;
// strtiffName += ".bmp";
// HOperatorSet.WriteImage(ho_Image, "bmp", 0, strtiffName);
//}
return ho_Image;
}
3)点云图的获取可以直接通过客户端SDK中相应的接口来完成,这里不做说明,我们着重说明下如何通过halcon算子将源数据生成点云图,获取点云对象(HTuple)
/// <summary>
/// 获取点云对象(object handle)
/// </summary>
/// <returns></returns>
public HTuple GetObjectModel3D()
{
MV3D_LP_IMAGE_DATA stImageInfoTmp = new MV3D_LP_IMAGE_DATA();
Monitor.Enter(Lock);
GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf, GCHandleType.Pinned);
IntPtr ptr = hBuf.AddrOfPinnedObject();
stImageInfoTmp.enImageType = m_stImageInfo.enImageType;
stImageInfoTmp.nFrameNum = m_stImageInfo.nFrameNum;
stImageInfoTmp.nWidth = m_stImageInfo.nWidth;
stImageInfoTmp.nHeight = m_stImageInfo.nHeight;
stImageInfoTmp.nDataLen = m_stImageInfo.nDataLen;
stImageInfoTmp.pData = ptr;
stImageInfoTmp.fXScale = m_stImageInfo.fXScale;
stImageInfoTmp.fYScale = m_stImageInfo.fYScale;
stImageInfoTmp.fZScale = m_stImageInfo.fZScale;
stImageInfoTmp.nXOffset = m_stImageInfo.nXOffset;
stImageInfoTmp.nYOffset = m_stImageInfo.nYOffset;
stImageInfoTmp.nZOffset = m_stImageInfo.nZOffset;
MV3D_LP_IMAGE_DATA plyData = new MV3D_LP_IMAGE_DATA();
//先通过接口将深度图转点云图
Mv3dLpSDK.MV3D_LP_MapDepthToPointCloud(stImageInfoTmp, plyData);
//再将点云数据转换成halcon 可用的对象指针
HTuple hObjectModel3D = null;
HTuple hv_X = new HTuple();
HTuple hv_Y = new HTuple();
HTuple hv_Z = new HTuple();
float[] ConvertData = new float[plyData.nDataLen / 4];
Marshal.Copy(plyData.pData, ConvertData, 0, (int)plyData.nDataLen / 4);
for (int j = 0; j < plyData.nWidth * plyData.nHeight; j++)
{
hv_X[j] = ConvertData[j * 3];
hv_Y[j] = ConvertData[j * 3 + 1];
hv_Z[j] = ConvertData[j * 3 + 2];
}
HOperatorSet.GenObjectModel3dFromPoints(hv_X, hv_Y, hv_Z, out hObjectModel3D);
Monitor.Exit(Lock);
if (hBuf.IsAllocated)
{
hBuf.Free();
}
//if (null != hObjectModel3D)
//{
// // 保存点云图 | Save PointCloud
// string strPlyName = "./Halcon_Image_";
// strPlyName += plyData.nFrameNum;
// strPlyName += ".ply";
// HOperatorSet.WriteObjectModel3d(hObjectModel3D, "ply", strPlyName, "invert_normals", "false");
//}
return hObjectModel3D;
}
代码中设计到的XYZ缩放以及偏移,均可通过硬件技术资料获取,有关像素坐标到物理坐标的计算公式不同的硬件有些许的差异,大家不用担心;在这个转换工程中会使用一个关键的halcon算子“GenObjectModel3dFromPoints”
gen_object_model_3d_from_points( : : X, Y, Z : ObjectModel3D)
函数说明: 创建表示点云的三维对象模型。这些点由参数x、y和z坐标描述。
函数参数:
X:输入三维点云中点的x坐标;
Y:输入 三维点云中点的y坐标;
Z:输入 三维点云中点的y坐标;
ObjectModel3D:输出 3D对象模型句柄;
4)上面显示如何将相机源数据转换成点云对象模型句柄Htuple,其实我们可以只获取相机的深度图,然后通过方法将深度图转换成点云图并进行操作和显示,如下将说明转换的具体方法
/// <summary>
/// 深度图转点云图
/// </summary>
/// <param name="ho_ImageH">输入深度图</param>
/// <param name="hv_XScale">转点云坐标X缩放</param>
/// <param name="hv_XOffset">转点云坐标X偏移</param>
/// <param name="hv_YScale">转点云坐标Y缩放</param>
/// <param name="hv_YOffset">转点云坐标Y偏移</param>
/// <param name="hv_ZScale">转点云坐标Z缩放</param>
/// <param name="hv_ZOffset">转点云坐标Z偏移</param>
/// <returns></returns>
public static HTuple DepthImg2PointCloud(HObject ho_ImageH,
double hv_XScale, double hv_XOffset,
double hv_YScale, double hv_YOffset,
double hv_ZScale, double hv_ZOffset,
out HObject ho_ImageX, out HObject ho_ImageY, out HObject ho_ImageZ)
{
//对象变量
HObject ho_Domain, ho_ImageSurface;
HObject ho_ImageSurface1, ho_Region1;
HObject ho_Region2, ho_RegionDifference, ho_ImageReduced;
HObject ho_ImageHReal;
//转化比例
HTuple hv_Width = new HTuple();
HTuple hv_Height = new HTuple(), hv_Min = new HTuple();
HTuple hv_Max = new HTuple(), hv_Range = new HTuple();
HTuple hv_ObjectModel3D = new HTuple();
//初始化
HOperatorSet.GenEmptyObj(out ho_Domain);
HOperatorSet.GenEmptyObj(out ho_ImageSurface);
HOperatorSet.GenEmptyObj(out ho_ImageSurface1);
HOperatorSet.GenEmptyObj(out ho_ImageX);
HOperatorSet.GenEmptyObj(out ho_ImageY);
HOperatorSet.GenEmptyObj(out ho_Region1);
HOperatorSet.GenEmptyObj(out ho_Region2);
HOperatorSet.GenEmptyObj(out ho_RegionDifference);
HOperatorSet.GenEmptyObj(out ho_ImageReduced);
HOperatorSet.GenEmptyObj(out ho_ImageHReal);
HOperatorSet.GenEmptyObj(out ho_ImageZ);
//* 单位mm
//hv_ZScale = 0.001;
//using (HDevDisposeHelper dh = new HDevDisposeHelper())
//{
// hv_ZOffset = -10.0 / 1000;
//}
//hv_XScale.Dispose();
//hv_XScale = 0.020;
//hv_XOffset.Dispose();
//using (HDevDisposeHelper dh = new HDevDisposeHelper())
//{
// hv_XOffset = -7051.0 / 1000;
//}
//hv_YScale.Dispose();
//hv_YScale = -0.020;
//hv_YOffset.Dispose();
//hv_YOffset = 0.0;
//xyz三通道的深度图
hv_Width.Dispose(); hv_Height.Dispose();
HOperatorSet.GetImageSize(ho_ImageH, out hv_Width, out hv_Height);
ho_Domain.Dispose();
HOperatorSet.GetDomain(ho_ImageH, out ho_Domain);
//生成xy坐标的图像映射,乘以分辨率就是xy的相对值
yImage
ho_ImageSurface.Dispose();
HOperatorSet.GenImageSurfaceFirstOrder(out ho_ImageSurface, "real", 1.0, 0.0,
0.0, 0.0, 0.0, hv_Width, hv_Height);
xImage
ho_ImageSurface1.Dispose();
HOperatorSet.GenImageSurfaceFirstOrder(out ho_ImageSurface1, "real", 0.0, 1.0,
0.0, 0.0, 0.0, hv_Width, hv_Height);
ho_ImageX.Dispose();
HOperatorSet.ScaleImage(ho_ImageSurface1, out ho_ImageX, hv_XScale, hv_XOffset);
ho_ImageY.Dispose();
HOperatorSet.ScaleImage(ho_ImageSurface, out ho_ImageY, hv_YScale, hv_YOffset);
//使用算子scale_image将图像灰度值转实际高度,并提取最大最小高度
ho_Domain.Dispose();
HOperatorSet.GetDomain(ho_ImageH, out ho_Domain);
hv_Min.Dispose(); hv_Max.Dispose(); hv_Range.Dispose();
HOperatorSet.MinMaxGray(ho_Domain, ho_ImageH, 0, out hv_Min, out hv_Max, out hv_Range);
ho_Region1.Dispose();
HOperatorSet.Threshold(ho_ImageH, out ho_Region1, hv_Min, hv_Max);
using (HDevDisposeHelper dh = new HDevDisposeHelper())
{
ho_Region2.Dispose();
HOperatorSet.Threshold(ho_ImageH, out ho_Region2, hv_Min, hv_Min + 2);
}
ho_RegionDifference.Dispose();
HOperatorSet.Difference(ho_Region1, ho_Region2, out ho_RegionDifference);
ho_ImageReduced.Dispose();
HOperatorSet.ReduceDomain(ho_ImageH, ho_RegionDifference, out ho_ImageReduced
);
ho_ImageHReal.Dispose();
HOperatorSet.ConvertImageType(ho_ImageReduced, out ho_ImageHReal, "real");
ho_ImageZ.Dispose();
HOperatorSet.ScaleImage(ho_ImageHReal, out ho_ImageZ, hv_ZScale, hv_ZOffset);
//ho_MultiChannelImage.Dispose();
//HOperatorSet.Compose3(ho_ImageX, ho_ImageY, ho_ImageZ, out ho_MultiChannelImage
// );
hv_ObjectModel3D.Dispose();
HOperatorSet.XyzToObjectModel3d(ho_ImageX, ho_ImageY, ho_ImageZ, out hv_ObjectModel3D);
//点云保存
//HOperatorSet.WriteObjectModel3d(hv_ObjectModel3D, "ply", "transply.ply",
// new HTuple(), new HTuple());
ho_Domain.Dispose();
ho_ImageSurface.Dispose();
ho_ImageSurface1.Dispose();
//ho_ImageX.Dispose();
//ho_ImageY.Dispose();
ho_Region1.Dispose();
ho_Region2.Dispose();
ho_RegionDifference.Dispose();
ho_ImageReduced.Dispose();
ho_ImageHReal.Dispose();
//ho_ImageZ.Dispose();
return hv_ObjectModel3D;
}
上图代码中都有详细的注解,所以就不做进一步说明,不过最后会使用一个关键的算子“XyzToObjectModel3d”来进行转换
xyz_to_object_model_3d
(X, Y, Z : : : ObjectModel3D)
将3D点从图像转换为3D对象模型。
输入:
X (input_object) 单通道图像→object (real)
用三维点的x坐标和三维点ROI区域。
Y (input_object) 单通道图像→object (real)
图像与三维点的y坐标。
Z (input_object) 单通道图像→object (real)
图像与三维点的z坐标。
输出:
ObjectModel3D (output_control)
3D对象模型的句柄。
5)通过上面的方法转换后,咱们在这将点云显示出来看看
点云的查看会用到一个重要的算子“visualize_object_model_3d”
visualize_object_model_3d( : : WindowHandle, ObjectModel3D, CamParam, PoseIn, GenParamName, GenParamValue, Title, Label, Information : PoseOut)
WindowHandle:显示窗口句柄
ObjectModel3D:需要展示的3d模型
CamParam:假想的观察这个模型的一个面阵相机的内参
PoseIn:这个模型的3d姿态
GenParamName:参数名
GenParamValue:参数值
Title:展示在窗口左上角的文字
Label:在每个3d模型位置显示的文本
Information:窗口左下角显示的信息
PoseOut:用户调整模型姿态后输出这个3d姿态
6)附件:我们来看看转换后的点云如何查看和操作
点云查看器
该文章主要讲解了图像的来源以及他们之间如何进行转换,并且生成可通过halcon操作的对象,后面的文件我们会一一讲解图像显示控件以及点云的相关操作算法,敬请期待!!!