因为太多的博客并没有解决pythonnet指针传图问题,本文是自己学习后加入自己深入理解的总结记录,方便自己以后查看。
不通过外部库从C#传图给python,从python传图给C#
1、c#通过指针传输图片和接受图片
public static ICogImage visiontool_predict(ICogImage image1, ICogImage image2, ICogImage image3)
{
try
{
using (Py.GIL())
{
#region 指针传图(方式一,正在更新直接转换内存格式这边直接处理图像)
CogImage8Grey bimage = (CogImage8Grey)image1;
CogImage8Grey gimage = (CogImage8Grey)image2;
CogImage8Grey rimage = (CogImage8Grey)image3;
var blue = bimage.Get8GreyPixelMemory(Cognex.VisionPro.CogImageDataModeConstants.Read, 0, 0, bimage.Width, bimage.Height).Scan0;
var green = gimage.Get8GreyPixelMemory(Cognex.VisionPro.CogImageDataModeConstants.Read, 0, 0, bimage.Width, bimage.Height).Scan0;
var red = rimage.Get8GreyPixelMemory(Cognex.VisionPro.CogImageDataModeConstants.Read, 0, 0, bimage.Width, bimage.Height).Scan0;
IntPtr outputPtr = Marshal.AllocHGlobal((int)(sizeof(byte) * bimage.Height * bimage.Width*3));
unsafe
{
try
{
long longArr_R = (long)(byte*)red;
long longArr_G = (long)(byte*)green;
long longArr_B = (long)(byte*)blue;
xbbtest.run_project(project, longArr_R, longArr_G, longArr_B, outputPtr.ToInt64(), (int)bimage.Height, (int)bimage.Width);
Bitmap bitmap3 = new Bitmap(bimage.Width, bimage.Height, bimage.Width*3, PixelFormat.Format24bppRgb, outputPtr);
ICogImage cogImage = new CogImage24PlanarColor(bitmap3);
//cogImage = cogImage.ScaleImage(4022, 1700); //修改图片大小
Marshal.FreeHGlobal(outputPtr);//释放内存
return cogImage;
}
catch (Exception ex)
{
LogHelper.Error("yolact推理时捕捉到异常!", ex);
}
}
#endregion
}
}
catch (Exception ex)
{
LogHelper.Error("yolact推理时捕捉到异常!", ex);
}
return null;
}
2、python通过指针接受图片和传输图片(cvMat格式)
#推理-外部程序调用
def run_project(project , in_ptr_r,in_ptr_g,in_ptr_b,out_ptr,width,height):
# <editor-fold 描述="通过指针生成图片">
arr_type = ctypes.POINTER(ctypes.c_uint8 * (height * width)) # 创建ctype指针类型
arr_pointer_r = ctypes.cast(in_ptr_r, arr_type)
arr_r = np.frombuffer(arr_pointer_r.contents, dtype=np.uint8)
img_r = arr_r.reshape(height,width)
arr_pointer_g = ctypes.cast(in_ptr_g, arr_type)
arr_g = np.frombuffer(arr_pointer_g.contents, dtype=np.uint8)
img_g = arr_g.reshape(height,width)
arr_pointer_b = ctypes.cast(in_ptr_b, arr_type)
arr_b = np.frombuffer(arr_pointer_b.contents, dtype=np.uint8)
img_b = arr_b.reshape(height,width)
img_b_data = GrayImage(img_b)
img_g_data = GrayImage(img_g)
img_r_data = GrayImage(img_r)
project.getTask('task1').getBlock("Vision__2").getTool("ImgThreshMergeTool_1").input_bchannel_image = img_b_data
project.getTask("task1").getBlock("Vision__2").getTool("ImgThreshMergeTool_1").input_gchannel_image = img_g_data
project.getTask("task1").getBlock("Vision__2").getTool("ImgThreshMergeTool_1").input_rchannel_image = img_r_data
project.getTask("task1").getBlock("Vision__2").run()
image_result = project.getTask("task1").getBlock("Vision__2").getTool(
"ImgThreshMergeTool_1").result.output_merge_image
# cv2image = cv2.cvtColor(image_result, cv2.COLOR_BGR2RGB)
# cv2image = np.ascontiguous(cv2image, dtype=a.dtype)
cv2image = image_result.ctypes.data
# pil_img = PilImage.fromarray(cv2image)
if out_ptr is not None:
# img_tensor = cv2image.byte()
# pointer = cv2image.data_ptr() # 返回tensor首元素的内存地址
ctypes.memmove(out_ptr, cv2image, height * width*3)