In both RealSense SDK and RealSense Viewer, saving a polint cloud as PLY file with RAW RGB seem to be wrong, because they may deal with RA16 format Bayer image just as Y16 grayscale image. But that is incorrect because the pixel values of Bayer pattern image present both value of R, G and B, therefore cause the chaos of saved point cloud colour.
Ground truth RAW point cloud
RealSense SDK & Viewer saving example:
Python code
//from ctypes import sizeof
//import math
//from pickle import TUPLE
//import time
//import pyrealsense2 as rs
//import numpy as np
//import cv2
//import os
//import plyfile
rawframes = pipeline.wait_for_frames()
depth_frame = rawframes.get_depth_frame()
raw_frame = rawframes.get_color_frame()
imgRAW = np.asanyarray(raw_frame.get_data())
if state.color:
imgRAW = np.uint8(imgRAW / 256.0)
imgRGB = cv2.cvtColor(imgRAW, cv2.COLOR_BAYER_GBRG2RGB)
mapped_frame, color_source, = raw_frame, imgRGB
points = pc.calculate(depth_frame)
pc.map_to(mapped_frame)
depth_frame = decimate.process(depth_frame)
v, t = points.get_vertices(), points.get_texture_coordinates()
verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)# xyz(3 cols)
texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2)# uv(2 cols)
key = cv2.waitKey(1)
if key == ord("x"):
color_intrinsic = mapped_frame.profile.as_video_stream_profile().intrinsics
rgb = np.zeros((verts.shape[0], 3))
state.paused = True
for i in range(0, verts.shape[0]):
x = texcoords[i][0] * color_intrinsic.width#1920
y = texcoords[i][1] * color_intrinsic.height#1080
x = int(x)
y = int(y)
if x<0:
x=0
if x>color_intrinsic.width-1:
x = color_intrinsic.width-1
if y<0:
y=0
if y>color_intrinsic.height-1:
y=color_intrinsic.height-1
rgb[i, :] = color_source[y, x, :]
rgb.astype(int)
pointnum = verts.shape[0]
ply_file = [0]*verts.shape[0]
outputply = np.zeros((verts.shape[0], 6))
points = np.zeros((verts.shape[0]), dtype=[('x', 'f'), ('y', 'f'),('z', 'f'), ('red', 'u1'),
('green', 'u1'),('blue', 'u1')])
for i in range(0, verts.shape[0]):
x = verts[i,0]
y = verts[i,1]
z = verts[i,2]
r = np.uint8(rgb[i,2])
g = np.uint8(rgb[i,1])
b = np.uint8(rgb[i,0])
points[i]=np.array([(x,y,z, r, g, b)],
dtype=[('x', 'f'), ('y', 'f'),('z', 'f'), ('red', 'u1'), ('green', 'u1'),('blue',
'u1')])
print(points.shape)
outfile="./output/MyRAWout.ply"
write_ply(outfile, points)
Saving result:
Cut off the uv locations which are out of the boundary since that is caused by the depth camera FOV is larger than RGB camera FOV.
Link: https://github.com/IntelRealSense/librealsense/issues/11543#issue-1614921924.