"""
Read and write PCL .pcd files in python.
dimatura@cmu.edu, 2013-2018
- TODO better API for wacky operations.
- TODO add a cli for common operations.
- TODO deal properly with padding
- TODO deal properly with multicount fields
- TODO better support for rgb nonsense
"""import re
import struct
import copy
from io import BytesIO as sio
# import cStringIO as sioimport numpy as np
import warnings
import lzf
HAS_SENSOR_MSGS =Truetry:from sensor_msgs.msg import PointField
import numpy_pc2 # needs sensor_msgsexcept ImportError:
HAS_SENSOR_MSGS =False
__all__ =['PointCloud','point_cloud_to_path','point_cloud_to_buffer','point_cloud_to_fileobj','point_cloud_from_path','point_cloud_from_buffer','point_cloud_from_fileobj','make_xyz_point_cloud','make_xyz_rgb_point_cloud','make_xyz_label_point_cloud','save_txt','cat_point_clouds','add_fields','update_field','build_ascii_fmtstr','encode_rgb_for_pcl','decode_rgb_from_pcl','save_point_cloud','save_point_cloud_bin','save_point_cloud_bin_compressed','pcd_type_to_numpy_type','numpy_type_to_pcd_type',]if HAS_SENSOR_MSGS:
pc2_pcd_type_mappings =[(PointField.INT8,('I',1)),(PointField.UINT8,('U',1)),(PointField.INT16,('I',2)),(PointField.UINT16,('U',2)),(PointField.INT32,('I',4)),(PointField.UINT32,('U',4)),(PointField.FLOAT32,('F',4)),(PointField.FLOAT64,('F',8))]
pc2_type_to_pcd_type =dict(pc2_pcd_type_mappings)
pcd_type_to_pc2_type =dict((q, p)for(p, q)in pc2_pcd_type_mappings)
__all__.extend(['pcd_type_to_pc2_type','pc2_type_to_pcd_type'])
numpy_pcd_type_mappings =[(np.dtype('float32'),('F',4)),(np.dtype('float64'),('F',8)),(np.dtype('uint8'),('U',1)),(np.dtype('uint16'),('U',2)),(np.dtype('uint32'),('U',4)),(np.dtype('uint64'),('U',8)),(np.dtype('int16'),('I',2)),(np.dtype('int32'),('I',4)),(np.dtype('int64'),('I',8))]
numpy_type_to_pcd_type =dict(numpy_pcd_type_mappings)
pcd_type_to_numpy_type =dict((q, p)for(p, q)in numpy_pcd_type_mappings)defparse_header(lines):""" Parse header of PCD files.
"""
metadata ={}for ln in lines:if ln.startswith('#')orlen(ln)<2:continuematch= re.match('(\w+)\s+([\w\s\.]+)', ln)ifnotmatch:
warnings.warn("warning: can't understand line: %s"% ln)continue
key, value =match.group(1).lower(),match.group(2)if key =='version':
metadata[key]= value
elif key in('fields','type'):
metadata[key]= value.split()elif key in('size','count'):
metadata[key]=list(map(int, value.split()))elif key in('width','height','points'):
metadata[key]=int(value)elif key =='viewpoint':
metadata[key]=list(map(float, value.split()))elif key =='data':
metadata[key]= value.strip().lower()# TODO apparently count is not required?# add some reasonable defaultsif'count'notin metadata:
metadata['count']=[1]*len(metadata['fields'])if'viewpoint'notin metadata:
metadata['viewpoint']=[0.0,0.0,0.0,1.0,0.0,0.0,0.0]if'version'notin metadata:
metadata['version']='.7'return metadata
defwrite_header(metadata, rename_padding=False):""" Given metadata as dictionary, return a string header.
"""
template ="""\
VERSION {version}
FIELDS {fields}
SIZE {size}
TYPE {type}
COUNT {count}
WIDTH {width}
HEIGHT {height}
VIEWPOINT {viewpoint}
POINTS {points}
DATA {data}
"""
str_metadata = metadata.copy()ifnot rename_padding:
str_metadata['fields']=' '.join(metadata['fields'])else:
new_fields =[]for f in metadata['fields']:if f =='_':
new_fields.append('padding')else:
new_fields.append(f)
str_metadata['fields']=' '.join(new_fields)
str_metadata['size']=' '.join(map(str, metadata['size']))
str_metadata['type']=' '.join(metadata['type'])
str_metadata['count']=' '.join(map(str, metadata['count']))
str_metadata['width']=str(metadata['width'])
str_metadata['height']=str(metadata['height'])
str_metadata['viewpoint']=' '.join(map(str, metadata['viewpoint']))
str_metadata['points']=str(metadata['points'])
tmpl = template.format(**str_metadata)return tmpl
def_metadata_is_consistent(metadata):""" Sanity check for metadata. Just some basic checks.
"""
checks =[]
required =('version','fields','size','width','height','points','viewpoint','data')for f in required:if f notin metadata:print('%s required'% f)
checks.append((lambda m:all([k in m for k in required]),'missing field'))
checks.append((lambda m:len(m['type'])==len(m['count'])==len(m['fields']),'length of type, count and fields must be equal'))
checks.append((lambda m: m['height']>=0,'height must be greater than 0'))
checks.append((lambda m: m['width']>=0,'width must be greater than 0'))
checks.append((lambda m: m['points']>0,'points must be greater than 0'))
checks.append((lambda m: m['data'].lower()in('ascii','binary','binary_compressed'),'unknown data type:''should be ascii/binary/binary_compressed'))
ok =Truefor check, msg in checks:ifnot check(metadata):print('error:', msg)
ok =Falsereturn ok
# def pcd_type_to_numpy(pcd_type, pcd_sz):# """ convert from a pcd type string and size to numpy dtype."""# typedict = {'F' : { 4:np.float32, 8:np.float64 },# 'I' : { 1:np.int8, 2:np.int16, 4:np.int32, 8:np.int64 },# 'U' : { 1:np.uint8, 2:np.uint16, 4:np.uint32 , 8:np.uint64 }}# return typedict[pcd_type][pcd_sz]def_build_dtype(metadata):""" Build numpy structured array dtype from pcl metadata.
Note that fields with count > 1 are 'flattened' by creating multiple
single-count fields.
*TODO* allow 'proper' multi-count fields.
"""
fieldnames =[]
typenames =[]for f, c, t, s inzip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]if c ==1:
fieldnames.append(f)
typenames.append(np_type)else:
fieldnames.extend(['%s_%04d'%(f, i)for i inrange(c)])
typenames.extend([np_type]*c)
dtype = np.dtype([x for x inzip(fieldnames, typenames)])return dtype
defbuild_ascii_fmtstr(pc):""" Make a format string for printing to ascii.
Note %.8f is minimum for rgb.
"""
fmtstr =[]for t, cnt inzip(pc.type, pc.count):if t =='F':
fmtstr.extend(['%.10f']*cnt)elif t =='I':
fmtstr.extend(['%d']*cnt)elif t =='U':
fmtstr.extend(['%u']*cnt)else:raise ValueError("don't know about type %s"% t)return fmtstr
defparse_ascii_pc_data(f, dtype, metadata):""" Use numpy to parse ascii pointcloud data.
"""return np.loadtxt(f, dtype=dtype, delimiter=' ')defparse_binary_pc_data(f, dtype, metadata):
rowstep = metadata['points']*dtype.itemsize
# for some reason pcl adds empty space at the end of files
buf = f.read(rowstep)return np.fromstring(buf, dtype=dtype)defparse_binary_compressed_pc_data(f, dtype, metadata):""" Parse lzf-compressed data.
Format is undocumented but seems to be:
- compressed size of data (uint32)
- uncompressed size of data (uint32)
- compressed data
- junk
"""
fmt ='II'
compressed_size, uncompressed_size =\
struct.unpack(fmt, f.read(struct.calcsize(fmt)))
compressed_data = f.read(compressed_size)# TODO what to use as second argument? if buf is None# (compressed > uncompressed)# should we read buf as raw binary?
buf = lzf.decompress(compressed_data, uncompressed_size)iflen(buf)!= uncompressed_size:raise IOError('Error decompressing data')# the data is stored field-by-field
pc_data = np.zeros(metadata['width'], dtype=dtype)
ix =0for dti inrange(len(dtype)):
dt = dtype[dti]bytes= dt.itemsize * metadata['width']
column = np.fromstring(buf[ix:(ix+bytes)], dt)
pc_data[dtype.names[dti]]= column
ix +=bytesreturn pc_data
defpoint_cloud_from_fileobj(f):""" Parse pointcloud coming from file object f
"""
header =[]whileTrue:
ln = f.readline().strip().decode(encoding ='utf-8')
header.append(ln)if ln.startswith('DATA'):
metadata = parse_header(header)
dtype = _build_dtype(metadata)breakif metadata['data']=='ascii':
pc_data = parse_ascii_pc_data(f, dtype, metadata)elif metadata['data']=='binary':
pc_data = parse_binary_pc_data(f, dtype, metadata)elif metadata['data']=='binary_compressed':
pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)else:print('DATA field is neither "ascii"or"binary"or\
"binary_compressed"')return PointCloud(metadata, pc_data)defpoint_cloud_from_path(fname):""" load point cloud in binary format
"""withopen(fname,'rb')as f:
pc = point_cloud_from_fileobj(f)return pc
defpoint_cloud_from_buffer(buf):
fileobj = sio.StringIO(buf)
pc = point_cloud_from_fileobj(fileobj)
fileobj.close()# necessary?return pc
defpoint_cloud_to_fileobj(pc, fileobj, data_compression=None):""" Write pointcloud as .pcd to fileobj.
If data_compression is not None it overrides pc.data.
"""
metadata = pc.get_metadata()if data_compression isnotNone:
data_compression = data_compression.lower()assert(data_compression in('ascii','binary','binary_compressed'))
metadata['data']= data_compression
header = write_header(metadata)if data_compression =='binary':
header =str.encode(header)
fileobj.write(header)if metadata['data'].lower()=='ascii':
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(fileobj, pc.pc_data, fmt=fmtstr)elif metadata['data'].lower()=='binary':
fileobj.write(pc.pc_data.tostring('C'))elif metadata['data'].lower()=='binary_compressed':# TODO# a '_' field is ignored by pcl and breakes compressed point clouds.# changing '_' to '_padding' or other name fixes this.# admittedly padding shouldn't be compressed in the first place.# reorder to column-by-column
uncompressed_lst =[]for fieldname in pc.pc_data.dtype.names:
column = np.ascontiguousarray(pc.pc_data[fieldname]).tostring('C')
uncompressed_lst.append(column)
uncompressed =''.join(uncompressed_lst)
uncompressed_size =len(uncompressed)# print("uncompressed_size = %r"%(uncompressed_size))
buf = lzf.compress(uncompressed)if buf isNone:# compression didn't shrink the file# TODO what do to do in this case when reading?
buf = uncompressed
compressed_size = uncompressed_size
else:
compressed_size =len(buf)
fmt ='II'
fileobj.write(struct.pack(fmt, compressed_size, uncompressed_size))
fileobj.write(buf)else:raise ValueError('unknown DATA type')# we can't close because if it's stringio buf then we can't get value afterdefpoint_cloud_to_path(pc, fname):withopen(fname,'w')as f:
point_cloud_to_fileobj(pc, f)defpoint_cloud_to_buffer(pc, data_compression=None):
fileobj = sio.StringIO()
point_cloud_to_fileobj(pc, fileobj, data_compression)return fileobj.getvalue()defsave_point_cloud(pc, fname):""" Save pointcloud to fname in ascii format.
"""withopen(fname,'w')as f:
point_cloud_to_fileobj(pc, f,'ascii')defsave_point_cloud_bin(pc, fname):""" Save pointcloud to fname in binary format.
"""withopen(fname,'wb')as f:
point_cloud_to_fileobj(pc, f,'binary')defsave_point_cloud_bin_compressed(pc, fname):""" Save pointcloud to fname in binary compressed format.
"""withopen(fname,'w')as f:
point_cloud_to_fileobj(pc, f,'binary_compressed')defsave_xyz_label(pc, fname, use_default_lbl=False):""" Save a simple (x y z label) pointcloud, ignoring all other features.
Label is initialized to 1000, for an obscure program I use.
"""
md = pc.get_metadata()ifnot use_default_lbl and('label'notin md['fields']):raise Exception('label is not a field in this point cloud')withopen(fname,'w')as f:for i inrange(pc.points):
x, y, z =['%.4f'% d for d in(
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i])]
lbl ='1000'if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, lbl))+'\n')defsave_xyz_intensity_label(pc, fname, use_default_lbl=False):""" Save XYZI point cloud.
"""
md = pc.get_metadata()ifnot use_default_lbl and('label'notin md['fields']):raise Exception('label is not a field in this point cloud')if'intensity'notin md['fields']:raise Exception('intensity is not a field in this point cloud')withopen(fname,'w')as f:for i inrange(pc.points):
x, y, z =['%.4f'% d for d in(
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i])]
intensity ='%.4f'% pc.pc_data['intensity'][i]
lbl ='1000'if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, intensity, lbl))+'\n')defsave_txt(pc, fname, header=True):""" Save to csv-style text file, separated by spaces.
TODO:
- support multi-count fields.
- other delimiters.
"""withopen(fname,'w')as f:if header:
header_lst =[]for field_name, cnt inzip(pc.fields, pc.count):if cnt ==1:
header_lst.append(field_name)else:for c inrange(cnt):
header_lst.append('%s_%04d'%(field_name, c))
f.write(' '.join(header_lst)+'\n')
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(f, pc.pc_data, fmt=fmtstr)defupdate_field(pc, field, pc_data):""" Updates field in-place.
"""
pc.pc_data[field]= pc_data
return pc
defadd_fields(pc, metadata, pc_data):""" Builds copy of pointcloud with extra fields.
Multi-count fields are sketchy, yet again.
"""iflen(set(metadata['fields']).intersection(set(pc.fields)))>0:raise Exception("Fields with that name exist.")if pc.points !=len(pc_data):raise Exception("Mismatch in number of points.")
new_metadata = pc.get_metadata()
new_metadata['fields'].extend(metadata['fields'])
new_metadata['count'].extend(metadata['count'])
new_metadata['size'].extend(metadata['size'])
new_metadata['type'].extend(metadata['type'])# parse metadata to add# TODO factor this
fieldnames, typenames =[],[]for f, c, t, s inzip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]if c ==1:
fieldnames.append(f)
typenames.append(np_type)else:
fieldnames.extend(['%s_%04d'%(f, i)for i inrange(c)])
typenames.extend([np_type]*c)
dtype =zip(fieldnames, typenames)# new dtype. could be inferred?
new_dtype =[(f, pc.pc_data.dtype[f])for f in pc.pc_data.dtype.names]+ dtype
new_data = np.empty(len(pc.pc_data), new_dtype)for n in pc.pc_data.dtype.names:
new_data[n]= pc.pc_data[n]for n, n_tmp inzip(fieldnames, pc_data.dtype.names):
new_data[n]= pc_data[n_tmp]# TODO maybe just all the metadata in the dtype.# TODO maybe use composite structured arrays for fields with count > 1
newpc = PointCloud(new_metadata, new_data)return newpc
defcat_point_clouds(pc1, pc2):""" Concatenate two point clouds into bigger point cloud.
Point clouds must have same metadata.
"""iflen(pc1.fields)!=len(pc2.fields):raise ValueError("Pointclouds must have same fields")
new_metadata = pc1.get_metadata()
new_data = np.concatenate((pc1.pc_data, pc2.pc_data))# TODO this only makes sense for unstructured pc?
new_metadata['width']= pc1.width+pc2.width
new_metadata['points']= pc1.points+pc2.points
pc3 = PointCloud(new_metadata, new_data)return pc3
defmake_xyz_point_cloud(xyz, metadata=None):""" Make a pointcloud object from xyz array.
xyz array is cast to float32.
"""
md ={'version':.7,'fields':['x','y','z'],'size':[4,4,4],'type':['F','F','F'],'count':[1,1,1],'width':len(xyz),'height':1,'viewpoint':[0.0,0.0,0.0,1.0,0.0,0.0,0.0],'points':len(xyz),'data':'binary'}if metadata isnotNone:
md.update(metadata)
xyz = xyz.astype(np.float32)
pc_data = xyz.view(np.dtype([('x', np.float32),('y', np.float32),('z', np.float32)]))# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)return pc
defmake_xyz_rgb_point_cloud(xyz_rgb, metadata=None):""" Make a pointcloud object from xyz array.
xyz array is assumed to be float32.
rgb is assumed to be encoded as float32 according to pcl conventions.
"""
md ={'version':.7,'fields':['x','y','z','rgb'],'count':[1,1,1,1],'width':len(xyz_rgb),'height':1,'viewpoint':[0.0,0.0,0.0,1.0,0.0,0.0,0.0],'points':len(xyz_rgb),'type':['F','F','F','F'],'size':[4,4,4,4],'data':'binary'}if xyz_rgb.dtype != np.float32:raise ValueError('array must be float32')if metadata isnotNone:
md.update(metadata)
pc_data = xyz_rgb.view(np.dtype([('x', np.float32),('y', np.float32),('z', np.float32),('rgb', np.float32)])).squeeze()# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)return pc
defencode_rgb_for_pcl(rgb):""" Encode bit-packed RGB for use with PCL.
:param rgb: Nx3 uint8 array with RGB values.
:rtype: Nx1 float32 array with bit-packed RGB, for PCL.
"""assert(rgb.dtype == np.uint8)assert(rgb.ndim ==2)assert(rgb.shape[1]==3)
rgb = rgb.astype(np.uint32)
rgb = np.array((rgb[:,0]<<16)|(rgb[:,1]<<8)|(rgb[:,2]<<0),
dtype=np.uint32)
rgb.dtype = np.float32
return rgb
defdecode_rgb_from_pcl(rgb):""" Decode the bit-packed RGBs used by PCL.
:param rgb: An Nx1 array.
:rtype: Nx3 uint8 array with one column per color.
"""
rgb = rgb.copy()
rgb.dtype = np.uint32
r = np.asarray((rgb >>16)&255, dtype=np.uint8)
g = np.asarray((rgb >>8)&255, dtype=np.uint8)
b = np.asarray(rgb &255, dtype=np.uint8)
rgb_arr = np.zeros((len(rgb),3), dtype=np.uint8)
rgb_arr[:,0]= r
rgb_arr[:,1]= g
rgb_arr[:,2]= b
return rgb_arr
defmake_xyz_label_point_cloud(xyzl, label_type='f', label ='label'):""" Make XYZL point cloud from numpy array.
TODO i labels?
"""
md ={'version':.7,'fields':['x','y','z', label],'count':[1,1,1,1],'width':len(xyzl),'height':1,'viewpoint':[0.0,0.0,0.0,1.0,0.0,0.0,0.0],'points':len(xyzl),'data':'ASCII'}if label_type.lower()=='f':
md['size']=[4,4,4,4]
md['type']=['F','F','F','F']elif label_type.lower()=='u':
md['size']=[4,4,4,1]
md['type']=['F','F','F','U']else:raise ValueError('label type must be F or U')# TODO use .view()
xyzl = xyzl.astype(np.float32)
dt = np.dtype([('x', np.float32),('y', np.float32),('z', np.float32),(label, np.float32)])
pc_data = np.rec.fromarrays([xyzl[:,0], xyzl[:,1], xyzl[:,2],
xyzl[:,3]], dtype=dt)
pc = PointCloud(md, pc_data)return pc
classPointCloud(object):""" Wrapper for point cloud data.
The variable members of this class parallel the ones used by
the PCD metadata (and similar to PCL and ROS PointCloud2 messages),
``pc_data`` holds the actual data as a structured numpy array.
The other relevant metadata variables are:
- ``version``: Version, usually .7
- ``fields``: Field names, e.g. ``['x', 'y' 'z']``.
- ``size.`: Field sizes in bytes, e.g. ``[4, 4, 4]``.
- ``count``: Counts per field e.g. ``[1, 1, 1]``. NB: Multi-count field
support is sketchy.
- ``width``: Number of points, for unstructured point clouds (assumed by
most operations).
- ``height``: 1 for unstructured point clouds (again, what we assume most
of the time.
- ``viewpoint``: A pose for the viewpoint of the cloud, as
x y z qw qx qy qz, e.g. ``[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]``.
- ``points``: Number of points.
- ``type``: Data type of each field, e.g. ``[F, F, F]``.
- ``data``: Data storage format. One of ``ascii``, ``binary`` or ``binary_compressed``.
See `PCL docs <http://pointclouds.org/documentation/tutorials/pcd_file_format.php>`__
for more information.
"""def__init__(self, metadata, pc_data):
self.metadata_keys = metadata.keys()
self.__dict__.update(metadata)
self.pc_data = pc_data
self.check_sanity()defget_metadata(self):""" returns copy of metadata """
metadata ={}for k in self.metadata_keys:
metadata[k]= copy.copy(getattr(self, k))return metadata
defcheck_sanity(self):# pdb.set_trace()
md = self.get_metadata()assert(_metadata_is_consistent(md))assert(len(self.pc_data)== self.points)# assert(self.width*self.height == self.points)assert(len(self.fields)==len(self.count))assert(len(self.fields)==len(self.type))defsave(self, fname):
self.save_pcd(fname,'ascii')defsave_pcd(self, fname, compression=None,**kwargs):if'data_compression'in kwargs:
warnings.warn('data_compression keyword is deprecated for'' compression')
compression = kwargs['data_compression']withopen(fname,'w')as f:
point_cloud_to_fileobj(self, f, compression)defsave_pcd_to_fileobj(self, fileobj, compression=None,**kwargs):if'data_compression'in kwargs:
warnings.warn('data_compression keyword is deprecated for'' compression')
compression = kwargs['data_compression']
point_cloud_to_fileobj(self, fileobj, compression)defsave_pcd_to_buffer(self, compression=None,**kwargs):if'data_compression'in kwargs:
warnings.warn('data_compression keyword is deprecated for'' compression')
compression = kwargs['data_compression']return point_cloud_to_buffer(self, compression)defsave_txt(self, fname):
save_txt(self, fname)defsave_xyz_label(self, fname,**kwargs):
save_xyz_label(self, fname,**kwargs)defsave_xyz_intensity_label(self, fname,**kwargs):
save_xyz_intensity_label(self, fname,**kwargs)defcopy(self):
new_pc_data = np.copy(self.pc_data)
new_metadata = self.get_metadata()return PointCloud(new_metadata, new_pc_data)defto_msg(self):ifnot HAS_SENSOR_MSGS:raise Exception('ROS sensor_msgs not found')# TODO is there some metadata we want to attach?return numpy_pc2.array_to_pointcloud2(self.pc_data)@staticmethoddeffrom_path(fname):return point_cloud_from_path(fname)@staticmethoddeffrom_fileobj(fileobj):return point_cloud_from_fileobj(fileobj)@staticmethoddeffrom_buffer(buf):return point_cloud_from_buffer(buf)@staticmethoddeffrom_array(arr):""" create a PointCloud object from an array.
"""
pc_data = arr.copy()
md ={'version':.7,'fields':[],'size':[],'count':[],'width':0,'height':1,'viewpoint':[0.0,0.0,0.0,1.0,0.0,0.0,0.0],'points':0,'type':[],'data':'binary_compressed'}
md['fields']= pc_data.dtype.names
for field in md['fields']:
type_, size_ =\
numpy_type_to_pcd_type[pc_data.dtype.fields[field][0]]
md['type'].append(type_)
md['size'].append(size_)# TODO handle multicount
md['count'].append(1)
md['width']=len(pc_data)
md['points']=len(pc_data)
pc = PointCloud(md, pc_data)return pc
@staticmethoddeffrom_msg(msg, squeeze=True):""" from pointcloud2 msg
squeeze: fix when clouds get 1 as first dim
"""ifnot HAS_SENSOR_MSGS:raise NotImplementedError('ROS sensor_msgs not found')
md ={'version':.7,'fields':[],'size':[],'count':[],'width': msg.width,'height': msg.height,'viewpoint':[0.0,0.0,0.0,1.0,0.0,0.0,0.0],'points':0,'type':[],'data':'binary_compressed'}for field in msg.fields:
md['fields'].append(field.name)
t, s = pc2_type_to_pcd_type[field.datatype]
md['type'].append(t)
md['size'].append(s)# TODO handle multicount correctlyif field.count >1:
warnings.warn('fields with count > 1 are not well tested')
md['count'].append(field.count)
pc_array = numpy_pc2.pointcloud2_to_array(msg)
pc_data = pc_array.reshape(-1)
md['height'], md['width']= pc_array.shape
md['points']=len(pc_data)
pc = PointCloud(md, pc_data)return pc
ply.py
### 0===============================0# | PLY files reader/writer |# 0===============================0### ----------------------------------------------------------------------------------------------------------------------## function to read/write .ply files## ----------------------------------------------------------------------------------------------------------------------## Hugues THOMAS - 10/02/2017## ----------------------------------------------------------------------------------------------------------------------## Imports and global variables# \**********************************/## Basic libsimport numpy as np
import sys
# Define PLY types
ply_dtypes =dict([(b'int8','i1'),(b'char','i1'),(b'uint8','u1'),(b'uchar','u1'),(b'int16','i2'),(b'short','i2'),(b'uint16','u2'),(b'ushort','u2'),(b'int32','i4'),(b'int','i4'),(b'uint32','u4'),(b'uint','u4'),(b'float32','f4'),(b'float','f4'),(b'float64','f8'),(b'double','f8')])# Numpy reader format
valid_formats ={'ascii':'','binary_big_endian':'>','binary_little_endian':'<'}# ----------------------------------------------------------------------------------------------------------------------## Functions# \***************/#defparse_header(plyfile, ext):# Variables
line =[]
properties =[]
num_points =Nonewhileb'end_header'notin line and line !=b'':
line = plyfile.readline()ifb'element'in line:
line = line.split()
num_points =int(line[2])elifb'property'in line:
line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))return num_points, properties
defparse_mesh_header(plyfile, ext):# Variables
line =[]
vertex_properties =[]
num_points =None
num_faces =None
current_element =Nonewhileb'end_header'notin line and line !=b'':
line = plyfile.readline()# Find point elementifb'element vertex'in line:
current_element ='vertex'
line = line.split()
num_points =int(line[2])elifb'element face'in line:
current_element ='face'
line = line.split()
num_faces =int(line[2])elifb'property'in line:if current_element =='vertex':
line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))elif current_element =='vertex':ifnot line.startswith('property list uchar int'):raise ValueError('Unsupported faces property : '+ line)return num_points, num_faces, vertex_properties
defread_ply(filename, triangular_mesh=False):"""
Read ".ply" files
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : array
data stored in the file
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> data = read_ply('example.ply')
>>> values = data['values']
array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654]
[ 0.850 0.018 0.988]
[ 0.395 0.394 0.363]
[ 0.873 0.996 0.092]])
"""withopen(filename,'rb')as plyfile:# Check if the file start with plyifb'ply'notin plyfile.readline():raise ValueError('The file does not start whith the word ply')# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()if fmt =="ascii":raise ValueError('The file is not binary')# get extension for building the numpy dtypes
ext = valid_formats[fmt]# PointCloud reader vs mesh readerif triangular_mesh:# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)# Get point data
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)# Get face data
face_properties =[('k', ext +'u1'),('v1', ext +'i4'),('v2', ext +'i4'),('v3', ext +'i4')]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)# Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
data =[vertex_data, faces]else:# Parse header
num_points, properties = parse_header(plyfile, ext)# Get data
data = np.fromfile(plyfile, dtype=properties, count=num_points)return data
defheader_properties(field_list, field_names):# List of lines to write
lines =[]# First line describing element vertex
lines.append('element vertex %d'% field_list[0].shape[0])# Properties lines
i =0for fields in field_list:for field in fields.T:
lines.append('property %s %s'%(field.dtype.name, field_names[i]))
i +=1return lines
defwrite_ply(filename, field_list, field_names, triangular_faces=None):"""
Write ".ply" files
Parameters
----------
filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one.
field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field.
field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of
fields.
Examples
--------
>>> points = np.random.rand(10, 3)
>>> write_ply('example1.ply', points, ['x', 'y', 'z'])
>>> values = np.random.randint(2, size=10)
>>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])
>>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)
>>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', 'values']
>>> write_ply('example3.ply', [points, colors, values], field_names)
"""# Format list input to the right form
field_list =list(field_list)if(type(field_list)==listortype(field_list)==tuple)elselist((field_list,))for i, field inenumerate(field_list):if field.ndim <2:
field_list[i]= field.reshape(-1,1)if field.ndim >2:print('fields have more than 2 dimensions')returnFalse# check all fields have the same number of data
n_points =[field.shape[0]for field in field_list]ifnot np.all(np.equal(n_points, n_points[0])):print('wrong field dimensions')returnFalse# Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1]for field in field_list])if(n_fields !=len(field_names)):print('wrong number of field names')returnFalse# Add extension if not thereifnot filename.endswith('.ply'):
filename +='.ply'# open in text mode to write the headerwithopen(filename,'w')as plyfile:# First magical word
header =['ply']# Encoding format
header.append('format binary_'+ sys.byteorder +'_endian 1.0')# Points properties description
header.extend(header_properties(field_list, field_names))# Add faces if needdedif triangular_faces isnotNone:
header.append('element face {:d}'.format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices')# End of header
header.append('end_header')# Write all linesfor line in header:
plyfile.write("%s\n"% line)# open in binary/append to use tofilewithopen(filename,'ab')as plyfile:# Create a structured array
i =0
type_list =[]for fields in field_list:for field in fields.T:
type_list +=[(field_names[i], field.dtype.str)]
i +=1
data = np.empty(field_list[0].shape[0], dtype=type_list)
i =0for fields in field_list:for field in fields.T:
data[field_names[i]]= field
i +=1
data.tofile(plyfile)if triangular_faces isnotNone:
triangular_faces = triangular_faces.astype(np.int32)
type_list =[('k','uint8')]+[(str(ind),'int32')for ind inrange(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k']= np.full((triangular_faces.shape[0],),3, dtype=np.uint8)
data['0']= triangular_faces[:,0]
data['1']= triangular_faces[:,1]
data['2']= triangular_faces[:,2]
data.tofile(plyfile)returnTruedefdescribe_element(name, df):""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats ={'f':'float','u':'uchar','i':'int'}
element =['element '+ name +' '+str(len(df))]if name =='face':
element.append("property list uchar int points_indices")else:for i inrange(len(df.columns)):# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property '+ f +' '+ df.columns.values[i])return element
defread_ply_header(filename, triangular_mesh=False):"""
Read ".ply" files header
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : list
list of tuple (name, dtype)
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> header = read_ply('example.ply')
[('x', '<f4'),
('y', '<f4'),
('z', '<f4'),
('value', '<u4')]
"""withopen(filename,'rb')as plyfile:# Check if the file start with plyifb'ply'notin plyfile.readline():raise ValueError('The file does not start whith the word ply')# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()if fmt =="ascii":raise ValueError('The file is not binary')# get extension for building the numpy dtypes
ext = valid_formats[fmt]# PointCloud reader vs mesh readerif triangular_mesh:# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)else:# Parse header
num_points, properties = parse_header(plyfile, ext)return np.dtype(properties)
用法
from ply import read_ply, write_ply, read_ply_header
data = read_ply('test.ply')
header = read_ply_header('test.ply')# header 等于 data.dtype, 如无需数据, read_ply_header 更快更省内存
names =list(data.dtype.names)
typer =[data.dtype[i]for i in names]
write_ply('test_out.ply',[data[h]for h in names], names)
import pypcd
pc = pypcd.PointCloud.from_path('test.pcd')
data = pc.pc_data
names =list(data.dtype.names)
typer =[data.dtype[i]for i in names]