【常用代码总结】常用的各种配置文件生成,realsense使用以及姿态估计所需


前言

经常在写代码的时候喜欢建立一个work.py去验证一些功能,写好之后在加入到主要的代码中,因此记录一下一些用过的代码,之后可以直接用。

一、使用pyrealsense2生成点云文件

参考链接
代码如下:

import pyrealsense2 as rs
import open3d as o3d
import cv2
#相机配置
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)

#相机深度参数,包括精度以及 depth_scale
depth_sensor = profile.get_device().first_depth_sensor()
depth_sensor.set_option(rs.option.visual_preset, 3)
depth_scale = depth_sensor.get_depth_scale()
clipping_distance_in_meters = 8  # 8 meter
clipping_distance = clipping_distance_in_meters / depth_scale
#color和depth对齐
align_to = rs.stream.color
align = rs.align(align_to)

print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<   Start Detection >>>>>>>>>>>>>>>>>>>>>>>>>>>>')
while True:
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)
    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
    #读取图像
    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    color_image = np.asanyarray(color_frame.get_data())
    #读取内参
    intrinsics = color_frame.profile.as_video_stream_profile().intrinsics
    o3d_inter = o3d.camera.PinholeCameraIntrinsic(intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
                                                  intrinsics.ppx, intrinsics.ppy)
    #点云生成和显示
    color_image =  cv2.cvtColor(color_image,cv2.COLOR_BGR2RGB)
    rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
        o3d.geometry.Image(color_image.copy()),
        o3d.geometry.Image(depth_image),
        depth_scale=1.0 / depth_scale,
        depth_trunc=clipping_distance_in_meters,
        convert_rgb_to_intensity=False)
    pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, o3d_inter)
    o3d.visualization.draw_geometries([pcd])

二、欧拉角转旋转矩阵

代码如下:

import  numpy as np
import math

theta= np.array([1.9309721e+00,  6.4933435e-03 ,-2.5455009e-03])

R_x = np.array([[1, 0, 0],
                [0, math.cos(theta[0]), -math.sin(theta[0])],
                [0, math.sin(theta[0]), math.cos(theta[0])]
                ])

R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
                [0, 1, 0],
                [-math.sin(theta[1]), 0, math.cos(theta[1])]
                ])

R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
                [math.sin(theta[2]), math.cos(theta[2]), 0],
                [0, 0, 1]
                ])
R = np.dot(R_z, np.dot(R_y, R_x))

三、姿态估计相关

1.PLY文件计算model_info

通常在姿态估计中经常使用ply文件的直径,这里是计算方法
代码如下:

from plyfile import PlyData
model_path = os.path.join('/home/sunh/6D_ws/AAE_torch/work_space/model/omron1.ply')
ply = PlyData.read(model_path)
data = ply.elements[0].data
x = data['x']
y = data['y']
z = data['z']
x_size = np.max(x)-np.min(x)
y_size = np.max(y)-np.min(y)
z_size = np.max(z)-np.min(z)
print(np.min(x))
print(np.max(x)-np.min(x))
print('-----------------------------------------------------------------')
print(np.min(y))
print(np.max(y)-np.min(y))
print('-----------------------------------------------------------------')
print(np.min(z))
print(np.max(z)-np.min(z))
print('-----------------------------------------------------------------')
print( np.sqrt(x_size**2 + y_size**2 + z_size**2) )

2.快速生成retinanet训练的 xml

通常姿态估计的时候,需要进行一个2D识别,渲染的图像里面,我们通常很容易获得bbox,因此直接用渲染图像进行,这里是生成图像的xml。格式和Labelimg生成的一样。
代码如下:

import xml.etree.ElementTree as ET
tree = ET.parse('/home/sunh/6D_ws/other_code/EfficientPose/Linemod_preprocessed/data/00/rgb0/0.xml')

scene_root = osp.join( "/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/scene_gt_info.json")
assert osp.exists(scene_root), scene_root
gt_dict = mmcv.load(scene_root)  # key is str(obj_id)

root = tree.getroot()
for i in range(0,1553):
    # get the bbox form render
    data_idx = gt_dict['{}'.format(i)]
    box = data_idx[0]['bbox_obj']

    sv_path = '/home/sunh/6D_ws/labelImg/data/cube/JPEGImages/{}.xml'.format(i)
    # change the <filename>
    root[1].text = '{}.jpg'.format(i)
    # change the <path>
    root[2].text = '/home/sunh/6D_ws/labelImg/data/cube/JPEGImages/{}.jpg'.format(i)
    # change the <object> <bndbox>
    for obj in root.iter('object'):
        xml_box = obj.find('bndbox')
        xml_box.find('xmin').text = str(box[0])
        xml_box.find('ymin').text = str(box[1])
        xml_box.find('xmax').text = str(box[0]+box[2])
        xml_box.find('ymax').text = str(box[1]+box[3])
    tree.write(sv_path)
import yaml
import os
sence_gt = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt.npy')

scence = {}

yaml_path = os.path.join( "/home/sunh/6D_ws/other_code/EfficientPose/dataset/data/00/info.yml")
outpt = yaml.load(open(yaml_path))
print(outpt[0])

with open(yaml_path) as fid:
    yaml_dic = yaml.safe_load(fid)

for i in range(0,5):
    RT = sence_gt[i]
    R = RT[0:3, 0:3].flatten()
    t = RT[0:3, 3]
    R = [float(R[0]), float(R[1]), float(R[2]), float(R[3]), float(R[4]),float(R[5]),float(R[6]), float(R[7]), float(R[8])]
    t = [float(t[0]) * 1000, float(t[1]) * 1000, float(t[2]) * 1000]
    data = {}
    data["cam_R_m2c"] = R
    data["cam_t_m2c"] = t
    data["obj_bb"] = [10,20,30,40]
    data["obj_id"] = 0
    scence[i] = [data]

with open(yamlpath, 'w') as f:
    yaml.dump(scence, f)

四、图像处理相关

这些都是好东西,但是有些杂乱…

import os
import pickle
from PIL import Image
import numpy as np
# import cv2
import os.path as osp
# import mmcv
import tqdm
import os
import pickle
import numpy as np
import pickle as pkl


###############################################################################################################
#####################################    generate pose_gt yaml for Efficient   ###############################
###############################################################################################################
# import yaml
# import os
# sence_gt = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt.npy')
#
# scence = {}
#
# yaml_path = os.path.join( "/home/sunh/6D_ws/other_code/EfficientPose/dataset/data/00/info.yml")
# outpt = yaml.load(open(yaml_path))
# print(outpt[0])
#
# with open(yaml_path) as fid:
#     yaml_dic = yaml.safe_load(fid)
#
# for i in range(0,5):
#     RT = sence_gt[i]
#     R = RT[0:3, 0:3].flatten()
#     t = RT[0:3, 3]
#     R = [float(R[0]), float(R[1]), float(R[2]), float(R[3]), float(R[4]),float(R[5]),float(R[6]), float(R[7]), float(R[8])]
#     t = [float(t[0]) * 1000, float(t[1]) * 1000, float(t[2]) * 1000]
#     data = {}
#     data["cam_R_m2c"] = R
#     data["cam_t_m2c"] = t
#     data["obj_bb"] = [10,20,30,40]
#     data["obj_id"] = 0
#     scence[i] = [data]
#
# with open(yamlpath, 'w') as f:
#     yaml.dump(scence, f)



###############################################################################################################
###############################################################################################################
# import json
#
# filename = os.path.join("/home/robolab/6D_ws/raster_triangle/Linemod_preprocessed/renders/ape_gt.json")
# data = {}
# data_all = {}
# R = [0, 0, 0, 0.5, -0.0081, -0.812001, -103, 14,-99]
# t = [-105.3577515, -117.52119142, 1014.8770132]
# for i in range(0,5):
#     data ['cam_R_m2c']  = R
#     data ['cam_t_m2c']  = t
#     data_all[i] = [data]
#
# with open(filename, 'w') as file_obj:
#     json.dump(data_all, file_obj)


##############################################################################################
#######################################    create txt and write the   ##################################
##############################################################################################
# ftrain = open('/media/sunh/Samsung_T5/6D_data/my_Dataset/omron/train.txt', 'w')
# for i in range(0,3000):
#     name = '{}'.format(i) + '\n'
#     ftrain.write(name)
# ftrain.close()

# ftrain = open('/media/sunh/Samsung_T5/6D_data/other_code/EfficientPose/Linemod_preprocessed/data/00/test.txt', 'w')
# with open("/media/sunh/Samsung_T5/6D_data/other_code/EfficientPose/Linemod_preprocessed/data/00/test.txt", "r") as f:  # 打开文件
#     data = f.read()  # 读取文件
#     for i in range(0,3000):
#         data_re ='{}'.format(i)+'\n'
#         ftrain.write(data_re)
# ftrain.close()




##############################################################################################
#######################################   learn the json    ##################################
##############################################################################################
# scene_root = osp.join( "/home/robolab/6D_ws/GDR-Net/datasets/BOP_DATASETS/lm/train_pbr/000000/scene_gt_info.json")
# assert osp.exists(scene_root), scene_root
# gt_dict = mmcv.load(scene_root)  # key is str(obj_id)
# print(gt_dict['0'])
# for anno_i, anno in enumerate(gt_dict['0']):
#     print('####################################::',anno)  #  {'bbox_obj': [-1, -1, -1, -1], 'bbox_visib': [-1, -1, -1, -1], 'px_count_all': 1420, 'px_count_valid': 1420, 'px_count_visib': 0, 'visib_fract': 0.0}
#     print('####################################::',anno_i)  # 0,1,2,
#     bbox_obj = anno["bbox_obj"]
#     px_count_all = anno["px_count_all"]
#     px_count_valid = anno["px_count_valid"]
#     px_count_visib = anno["px_count_visib"]
#
#     print('####################################::',bbox_obj)
#     img = cv2.imread('/home/robolab/6D_ws/GDR-Net/datasets/BOP_DATASETS/lm/train_pbr/000000/rgb/000000.jpg',1)
#     cv2.circle(img, (100, 100), 4, (0, 0, 255), -1)
#
#     #  drew bbox
#     x, y, w, h = bbox_obj[0],bbox_obj[1],bbox_obj[2],  bbox_obj[3]
#     font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
#     bgr_bbox = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
#     imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
#     cv2.imwrite('/home/robolab/6D_ws/raster_triangle/Linemod_preprocessed/renders/ape/{}.jpg'.format(anno_i), imgzi)








##############################################################################################
#######################################   read pkl    ########################################
##############################################################################################
# for i in range(0,5):
#     RT_path = os.path.join(
#         '/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape/' + "{}_RT.pkl".format(i))
#     f = open(RT_path,'rb')
#     inf = pickle.load(f)
#     print(inf)

##############################################################################################
#######################################   pkl 2 txt   ########################################
##############################################################################################
# np.set_printoptions(threshold=np.inf) #解决显示不完全问题
# f = open('/home/robolab/6D_ws/GDR-Net/datasets/lm_imgn/dataset_dicts_lm_imgn_13_train_1k_per_obj_d15c2915bca6e643b351e7feae889fee.pkl','rb')
# inf = pickle.load(f)
# print(inf)
# #f = open('1.pkl', 'wb') #二进制打开,如果找不到该文件,则创建一个
# #pickle.dump(inf[0:995], f) #写入文件
# #f.close()  #关闭文件
# inf = str(inf)
# ft = open("/home/robolab/6D_ws/GDR-Net/datasets/lm_imgn/a.txt",'w')
# ft.write(inf)


##############################################################################################
#######################################   copy img to other file   ###########################
##############################################################################################
# for i in range(0,10000):
#     # imgpath = os.path.join('/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape/' + "{}.jpg".format(i))
#     # newpath = os.path.join('/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape_rgb/' + "{}.jpg".format(i))
#     imgpath = os.path.join('/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape/' + "{}_depth.png".format(i))
#     newpath = os.path.join('/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape_depth/' + "{}_depth.png".format(i))
#     im = Image.open(imgpath)
#     im.save(newpath)








# f = open("/home/sunh/6D_ws/render_code/raster_triangle/Linemod_preprocessed/renders/ape/0.pkl", 'rb')
# inf = pickle.load(f)
# print(inf)
#
# cv2.imshow('s',inf['rgb'].astype("uint8"))
# cv2.waitKey(100000)

# rt = np.load('/media/sunh/Samsung_T5/6D_data/PVNET/from_old/custom/pose/0.npy')
# print(rt)



###############################################################################################################
######################################     generate model info json(diameter is a problem)      ###############
###############################################################################################################
# from plyfile import PlyData
# model_path = os.path.join('/home/sunh/6D_ws/AAE_torch/work_space/model/omron1.ply')
# ply = PlyData.read(model_path)
# data = ply.elements[0].data
# x = data['x']
# y = data['y']
# z = data['z']
# x_size = np.max(x)-np.min(x)
# y_size = np.max(y)-np.min(y)
# z_size = np.max(z)-np.min(z)
# print(np.min(x))
# print(np.max(x)-np.min(x))
# print('-----------------------------------------------------------------')
# print(np.min(y))
# print(np.max(y)-np.min(y))
# print('-----------------------------------------------------------------')
# print(np.min(z))
# print(np.max(z)-np.min(z))
# print('-----------------------------------------------------------------')
# print( np.sqrt(x_size**2 + y_size**2 + z_size**2) )




###############################################################################################################
##################################### generate pose_gt used by rgbd_render.py from icp's pose #################
###############################################################################################################
# pose_gt_all = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt.npy')
# print('--------------------------------------------------------------------')
# print(pose_gt_all[0][0:3,0:4])
# pose_gt = []
# for i in range(len(pose_gt_all)):
#     pose_gt.append(pose_gt_all[i][0:3,0:4])
# filename = '/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt_for_render.npy'
# np.save(filename, pose_gt)
# print("Transforms saved")


###############################################################################################################
################################################### jpg 2 png #################################################
###############################################################################################################
# import os
# for i in range(0,1607):
#     img_depth = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/depth/{}.png" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/depth/{}.png" .format(i),img_depth)
#     img_rgb = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/JPEGImages/{}.jpg" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/rgb/{}.png" .format(i),img_rgb)
#     img_mask = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/mask/{}.png" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/mask/{}.png" .format(i),img_mask)
#     print(i)
#
# for i in range(0,1556):
#     img_depth = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/depth/{}.png" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/depth/{}.png" .format(i+1607),img_depth)
#     img_rgb = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/JPEGImages/{}.jpg" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/rgb/{}.png" .format(i+1607),img_rgb)
#     img_mask = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/mask/{}.png" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/mask/{}.png" .format(i+1607),img_mask)
#     print(i)


###############################################################################################################
################################################### jpg 2 png #################################################
###############################################################################################################



###############################################################################################################
################################################### create more real dataset ##################################
###############################################################################################################
# import os
# import cv2
# import tqdm
# import numpy as np
# import json
# import yaml
# from lib.meshrenderer import meshrenderer_phong
#
# ##############################################   config:start  ############################################
# VIS = True
# bbox_VIS = True
# random_light = False
# render_near = 0.1
# render_far = 10000 # unit: should be mm
# K = np.array(  [[621.399658203125, 0, 313.72052001953125],
#               [0, 621.3997802734375, 239.97579956054688],
#               [0, 0, 1]])
# IM_W, IM_H = 640, 480
# ply_model_paths = [str('/home/sunh/6D_ws/init_pose/peg/peg_blender.ply')]
# sence_gt = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/pose_gt.npy')
# sence_gt2 = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/pose_gt.npy')
# sence_gt4 = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg4/pose_gt.npy')
# yamlpath_gt = os.path.join( "/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/gt.yml")
# yamlpath_info = os.path.join( "/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_train_3/info.yml")
# # render_dir = os.path.join('./custom_dataset' )
# max_rel_offset = 0.2  # used change the abs bbox
# ##############################################   config:end  ############################################
#
# Renderer = meshrenderer_phong.Renderer(ply_model_paths,
#                     samples=1,
#                     vertex_scale=float(1)) # float(1) for some models
#
# # sunhan add : get the bbox from depth
# def calc_2d_bbox(xs, ys, im_size):
#     bbTL = (max(xs.min() - 1, 0),
#             max(ys.min() - 1, 0))
#     bbBR = (min(xs.max() + 1, im_size[0] - 1),
#             min(ys.max() + 1, im_size[1] - 1))
#     return [bbTL[0], bbTL[1], bbBR[0] - bbTL[0], bbBR[1] - bbTL[1]]
#
#
# scene = {}
# scene_info = {}
# for idx in range(len(sence_gt)):
#     image_real = cv2.imread(
#         '/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/JPEGImages/{}.jpg'.format(idx))
#
#     RT = sence_gt[idx]
#     R = RT[0:3, 0:3]
#     t = RT[0:3, 3]*1000  # unit: should be mm
#     bgr, depth = Renderer.render(
#         obj_id=0,
#         W=IM_W,
#         H=IM_H,
#         K=K.copy(),
#         R=R,
#         t=t,
#         near=render_near,
#         far=render_far,
#         random_light=random_light
#     )
#     mask = (depth > 1e-8).astype('uint8')
#     show_msk = (mask / mask.max() * 255).astype("uint8")
#
#     if VIS:
#         ## real image from realsense
#         ## when two windows show out,push the 'Esc', you will get the visib pose
#         g_y = np.zeros_like(bgr)
#         g_y[:, :, 1] = bgr[:, :, 1]
#         im_bg = cv2.bitwise_and(image_real, image_real, mask=(g_y[:, :, 1] == 0).astype(np.uint8))
#         image_show = cv2.addWeighted(im_bg, 1, g_y, 1, 0)
#
#         cv2.imshow('render_real_align', image_show)
#         cv2.imshow('render', bgr)
#         cv2.imshow('real_image', image_real)
#         cv2.imshow('mask', show_msk)
#         cv2.waitKey()
#
#     # save mask image
#     print('------------------ save image id:  {}  -----------------'.format(idx))
#     cv2.imwrite('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg/mask/{}.png'.format(idx),show_msk.astype("uint8"))
#
#     # get abs bbox:  x,y,w,h
#     ys, xs = np.nonzero(depth > 0)
#     render_dims = [IM_W, IM_H]
#     try:
#         obj_bb = calc_2d_bbox(xs, ys, render_dims)
#     except ValueError as e:
#         print('Object in Rendering not visible. Have you scaled the vertices to mm?')
#         break
#     x, y, w, h = obj_bb
#
#     ## get bbox add rendom
#     # rand_trans_x = np.random.uniform(-max_rel_offset, max_rel_offset) * w
#     # rand_trans_y = np.random.uniform(-max_rel_offset, max_rel_offset) * h
#     # obj_bb_off = obj_bb + np.array([rand_trans_x, rand_trans_y, 0, 0])
#     obj_bb_off = obj_bb + np.array([-8, -8, 16, 16])
#
#     if bbox_VIS:
#         # drew bbox
#         obj_bb =  obj_bb_off.copy()
#         x, y, w, h = int(obj_bb[0]), int(obj_bb[1]), int(obj_bb[2]),int(obj_bb[3])
#         font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
#         bgr_bbox = cv2.rectangle(image_real, (x, y), (x + w, y + h), (0, 255, 0), 2)
#         imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
#         cv2.imshow('show bbox',imgzi)
#         cv2.waitKey()
#
#     R = RT[0:3, 0:3].flatten()
#     t = RT[0:3, 3]
#     R = [float(R[0]), float(R[1]), float(R[2]), float(R[3]), float(R[4]),float(R[5]),float(R[6]), float(R[7]), float(R[8])]
#     t = [float(t[0]) * 1000, float(t[1]) * 1000, float(t[2]) * 1000]
#
#     # bbox_render_real
#     data = {}
#     obj_bb = [ int(x), int(y), int(w), int(h) ]
#     data["cam_R_m2c"] = R
#     data["cam_t_m2c"] = t
#     data["obj_bb"] = obj_bb
#     data["obj_id"] = 0
#     scene[idx] = [data]
#
#
#     data_info = {}
#     data_info["cam_K"] = [float(K[0][0]), float(K[0][1]), float(K[0][2]),
#                           float(K[1][0]), float(K[1][1]), float(K[1][2]),
#                           float(K[2][0]),float(K[2][1]),float(K[2][2])]
#     data_info["depth_scale"] = 0.00012498664727900177
#     scene_info[idx] = data_info
#
#
# for idx in range(len(sence_gt2)):
#     image_real = cv2.imread(
#         '/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/JPEGImages/{}.jpg'.format(idx))
#
#     RT = sence_gt2[idx]
#     R = RT[0:3, 0:3]
#     t = RT[0:3, 3]*1000  # unit: should be mm
#     bgr, depth = Renderer.render(
#         obj_id=0,
#         W=IM_W,
#         H=IM_H,
#         K=K.copy(),
#         R=R,
#         t=t,
#         near=render_near,
#         far=render_far,
#         random_light=random_light
#     )
#     mask = (depth > 1e-8).astype('uint8')
#     show_msk = (mask / mask.max() * 255).astype("uint8")
#
#     if VIS:
#         ## real image from realsense
#         ## when two windows show out,push the 'Esc', you will get the visib pose
#         g_y = np.zeros_like(bgr)
#         g_y[:, :, 1] = bgr[:, :, 1]
#         im_bg = cv2.bitwise_and(image_real, image_real, mask=(g_y[:, :, 1] == 0).astype(np.uint8))
#         image_show = cv2.addWeighted(im_bg, 1, g_y, 1, 0)
#
#         cv2.imshow('render_real_align', image_show)
#         cv2.imshow('render', bgr)
#         cv2.imshow('real_image', image_real)
#         cv2.imshow('mask', show_msk)
#         cv2.waitKey()
#
#     # save mask image
#     print('------------------ save image id:  {}  -----------------'.format(idx))
#     cv2.imwrite('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg2/mask/{}.png'.format(idx),show_msk.astype("uint8"))
#
#     # get abs bbox:  x,y,w,h
#     ys, xs = np.nonzero(depth > 0)
#     render_dims = [IM_W, IM_H]
#     try:
#         obj_bb = calc_2d_bbox(xs, ys, render_dims)
#     except ValueError as e:
#         print('Object in Rendering not visible. Have you scaled the vertices to mm?')
#         break
#     x, y, w, h = obj_bb
#
#     ## get bbox add rendom
#     # rand_trans_x = np.random.uniform(-max_rel_offset, max_rel_offset) * w
#     # rand_trans_y = np.random.uniform(-max_rel_offset, max_rel_offset) * h
#     # obj_bb_off = obj_bb + np.array([rand_trans_x, rand_trans_y, 0, 0])
#     obj_bb_off = obj_bb + np.array([-8, -8, 16, 16])
#
#     if bbox_VIS:
#         # drew bbox
#         obj_bb =  obj_bb_off.copy()
#         x, y, w, h = int(obj_bb[0]), int(obj_bb[1]), int(obj_bb[2]),int(obj_bb[3])
#         font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
#         bgr_bbox = cv2.rectangle(image_real, (x, y), (x + w, y + h), (0, 255, 0), 2)
#         imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
#         cv2.imshow('show bbox',imgzi)
#         cv2.waitKey()
#
#     R = RT[0:3, 0:3].flatten()
#     t = RT[0:3, 3]
#     R = [float(R[0]), float(R[1]), float(R[2]), float(R[3]), float(R[4]),float(R[5]),float(R[6]), float(R[7]), float(R[8])]
#     t = [float(t[0]) * 1000, float(t[1]) * 1000, float(t[2]) * 1000]
#
#     # bbox_render_real
#     data = {}
#     obj_bb = [ int(x), int(y), int(w), int(h) ]
#     data["cam_R_m2c"] = R
#     data["cam_t_m2c"] = t
#     data["obj_bb"] = obj_bb
#     data["obj_id"] = 0
#     idx2 = idx + 1607
#     scene[idx2] = [data]
#
#
#     data_info = {}
#     data_info["cam_K"] = [float(K[0][0]), float(K[0][1]), float(K[0][2]),
#                           float(K[1][0]), float(K[1][1]), float(K[1][2]),
#                           float(K[2][0]),float(K[2][1]),float(K[2][2])]
#     data_info["depth_scale"] = 0.00012498664727900177
#     scene_info[idx2] = data_info
#
#
# # for idx in range(len(sence_gt4)):
# #     image_real = cv2.imread(
# #         '/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg4/JPEGImages/{}.jpg'.format(idx))
# #
# #     RT = sence_gt2[idx]
# #     R = RT[0:3, 0:3]
# #     t = RT[0:3, 3]*1000  # unit: should be mm
# #     bgr, depth = Renderer.render(
# #         obj_id=0,
# #         W=IM_W,
# #         H=IM_H,
# #         K=K.copy(),
# #         R=R,
# #         t=t,
# #         near=render_near,
# #         far=render_far,
# #         random_light=random_light
# #     )
# #     mask = (depth > 1e-8).astype('uint8')
# #     show_msk = (mask / mask.max() * 255).astype("uint8")
# #
# #     if VIS:
# #         ## real image from realsense
# #         ## when two windows show out,push the 'Esc', you will get the visib pose
# #         g_y = np.zeros_like(bgr)
# #         g_y[:, :, 1] = bgr[:, :, 1]
# #         im_bg = cv2.bitwise_and(image_real, image_real, mask=(g_y[:, :, 1] == 0).astype(np.uint8))
# #         image_show = cv2.addWeighted(im_bg, 1, g_y, 1, 0)
# #
# #         cv2.imshow('render_real_align', image_show)
# #         cv2.imshow('render', bgr)
# #         cv2.imshow('real_image', image_real)
# #         cv2.imshow('mask', show_msk)
# #         cv2.waitKey()
# #
# #     # save mask image
# #     print('------------------ save image id:  {}  -----------------'.format(idx))
# #     cv2.imwrite('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg4/mask/{}.png'.format(idx),show_msk.astype("uint8"))
# #
# #     # get abs bbox:  x,y,w,h
# #     ys, xs = np.nonzero(depth > 0)
# #     render_dims = [IM_W, IM_H]
# #     try:
# #         obj_bb = calc_2d_bbox(xs, ys, render_dims)
# #     except ValueError as e:
# #         print('Object in Rendering not visible. Have you scaled the vertices to mm?')
# #         break
# #     x, y, w, h = obj_bb
# #
# #     ## get bbox add rendom
# #     # rand_trans_x = np.random.uniform(-max_rel_offset, max_rel_offset) * w
# #     # rand_trans_y = np.random.uniform(-max_rel_offset, max_rel_offset) * h
# #     # obj_bb_off = obj_bb + np.array([rand_trans_x, rand_trans_y, 0, 0])
# #     obj_bb_off = obj_bb + np.array([-8, -8, 16, 16])
# #
# #     if bbox_VIS:
# #         # drew bbox
# #         obj_bb =  obj_bb_off.copy()
# #         x, y, w, h = int(obj_bb[0]), int(obj_bb[1]), int(obj_bb[2]),int(obj_bb[3])
# #         font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
# #         bgr_bbox = cv2.rectangle(image_real, (x, y), (x + w, y + h), (0, 255, 0), 2)
# #         imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
# #         cv2.imshow('show bbox',imgzi)
# #         cv2.waitKey()
# #
# #     R = RT[0:3, 0:3].flatten()
# #     t = RT[0:3, 3]
# #     R = [float(R[0]), float(R[1]), float(R[2]), float(R[3]), float(R[4]),float(R[5]),float(R[6]), float(R[7]), float(R[8])]
# #     t = [float(t[0]) * 1000, float(t[1]) * 1000, float(t[2]) * 1000]
# #
# #     # bbox_render_real
# #     data = {}
# #     obj_bb = [ int(x), int(y), int(w), int(h) ]
# #     data["cam_R_m2c"] = R
# #     data["cam_t_m2c"] = t
# #     data["obj_bb"] = obj_bb
# #     data["obj_id"] = 0
# #     idx4= idx + len(sence_gt2) + len(sence_gt4)
# #     scene[idx4] = [data]
# #
# #
# #     data_info = {}
# #     data_info["cam_K"] = [float(K[0][0]), float(K[0][1]), float(K[0][2]),
# #                           float(K[1][0]), float(K[1][1]), float(K[1][2]),
# #                           float(K[2][0]),float(K[2][1]),float(K[2][2])]
# #     data_info["depth_scale"] = 0.00012498664727900177
# #     scene_info[idx4] = data_info
#
#
# with open(yamlpath_gt, 'w') as f:
#     yaml.dump(scene, f)
#
# with open(yamlpath_info, 'w') as f:
#     yaml.dump(scene_info, f)




# import os
# for i in range(0,608):
#     img_depth = cv2.imread("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_test/JPEGImages/{}.jpg" .format(i))
#     cv2.imwrite("/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/peg_test/rgb/{}.png" .format(i),img_depth)
#
#     print(i)







import os
import pickle
import mmcv
from PIL import Image
import numpy as np
# import cv2
import os.path as osp
import tqdm

import json

#################################################################################################################################################
###########################################################    fast get retinanet data xml  #####################################################
#################################################################################################################################################
# import xml.etree.ElementTree as ET
#
# tree = ET.parse('/home/sunh/6D_ws/other_code/EfficientPose/Linemod_preprocessed/data/00/rgb0/0.xml')
#
# scene_root = osp.join( "/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/scene_gt_info.json")
# assert osp.exists(scene_root), scene_root
# gt_dict = mmcv.load(scene_root)  # key is str(obj_id)
#
# root = tree.getroot()
# for i in range(0,1553):
#     # get the bbox form render
#     data_idx = gt_dict['{}'.format(i)]
#     box = data_idx[0]['bbox_obj']
#
#     sv_path = '/home/sunh/6D_ws/labelImg/data/cube/JPEGImages/{}.xml'.format(i)
#     # change the <filename>
#     root[1].text = '{}.jpg'.format(i)
#     # change the <path>
#     root[2].text = '/home/sunh/6D_ws/labelImg/data/cube/JPEGImages/{}.jpg'.format(i)
#     # change the <object> <bndbox>
#     for obj in root.iter('object'):
#         xml_box = obj.find('bndbox')
#         xml_box.find('xmin').text = str(box[0])
#         xml_box.find('ymin').text = str(box[1])
#         xml_box.find('xmax').text = str(box[0]+box[2])
#         xml_box.find('ymax').text = str(box[1]+box[3])
#     tree.write(sv_path)



########################################################################################################################
#####################################################  bbox from keras_retinanet  add realsense ########################
########################################################################################################################
# import keras
# # import keras_retinanet
# from keras_retinanet import models
# from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
# from keras_retinanet.utils.visualization import draw_box, draw_caption
# from keras_retinanet.utils.colors import label_color
# # import miscellaneous modules
# import matplotlib.pyplot as plt
# import cv2
# import os
# import numpy as np
# import time
# # set tf backend to allow memory to grow, instead of claiming everything
# import tensorflow as tf
# import pyrealsense2 as rs
#
#
# def get_session():
#     config = tf.ConfigProto()
#     config.gpu_options.allow_growth = True
#     return tf.Session(config=config)
#
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# keras.backend.tensorflow_backend.set_session(get_session())
# model_path = "/media/sunh/Samsung_T5/6D_data/retinanet/snapshots/resnet50_csv_24.h5"
# model_retinnet = models.load_model(model_path, backbone_name='resnet50')
# model_retinnet = models.convert_model(model_retinnet)
#
# # realsense camera
# pipeline = rs.pipeline()
# config = rs.config()
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# profile = pipeline.start(config)
# align_to = rs.stream.color
# align = rs.align(align_to)
#
# while True:
#     #realsense camera get color image
#     frames = pipeline.wait_for_frames()
#     aligned_frames = align.process(frames)
#
#     aligned_depth_frame = aligned_frames.get_depth_frame()
#     color_frame = aligned_frames.get_color_frame()
#
#     rs_image = np.asanyarray(color_frame.get_data())
#     cv2.imwrite('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/a.jpg', rs_image)
#
#     image = read_image_bgr('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/a.jpg')
#     H, W = image.shape[:2]
#     # keras_retinanet
#     draw = image.copy()
#     draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
#     image = preprocess_image(image)
#     image, scale = resize_image(image)
#     boxes, scores, labels = model_retinnet.predict_on_batch(np.expand_dims(image, axis=0))
#
#     valid_dets = np.where(scores[0] >= 0.95)
#     boxes /= scale
#     scores = scores[0][valid_dets]
#     boxes = boxes[0][valid_dets]
#     labels = labels[0][valid_dets]
#     filtered_boxes = []
#     filtered_scores = []
#     filtered_labels = []
#     print('--------------------------------------------')
#
#     for box, score, label in zip(boxes, scores, labels):
#         box[0] = np.minimum(np.maximum(box[0], 0), W)
#         box[1] = np.minimum(np.maximum(box[1], 0), H)
#         box[2] = np.minimum(np.maximum(box[2], 0), W)
#         box[3] = np.minimum(np.maximum(box[3], 0), H)
#         bb_xywh = np.array([box[0], box[1], box[2] - box[0], box[3] - box[1]])
#         if bb_xywh[2] < 0 or bb_xywh[3] < 0:
#             continue
#         filtered_boxes.append(bb_xywh)
#         filtered_scores.append(score)
#         filtered_labels.append(label)
#
#     color_dict = [(0, 255, 0), (0, 0, 255), (255, 0, 0), (255, 255, 0)] * 10
#     rs_image = cv2.imread('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/a.jpg')
#     for label, box, score in zip(filtered_labels, filtered_boxes, filtered_scores):
#         box = box.astype(np.int32)
#         xmin, ymin, xmax, ymax = box[0], box[1], box[0] + box[2], box[1] + box[3]
#         # print label
#         cv2.putText(rs_image, '%s : %1.3f' % (label, score), (xmin, ymax + 20), cv2.FONT_ITALIC, .5,
#                     color_dict[int(label)], 2)
#         cv2.rectangle(rs_image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
#
#
#     cv2.imshow('sssssssss', rs_image)
#     cv2.waitKey()
#
#
#
#
#

    # bbox_all = []
    # score_all = []
    # label_all = []
    # for box, score, label in zip(boxes[0], scores[0], labels[0]):
    #     if score < 0.95:
    #         break
    #     color = label_color(label)
    #     box = box.astype(int)
    #     bbox_all.append(box)
    #     score_all.append(score)
    #     label_all.append(label)
    #
    # print('bbox_all: ',bbox_all)
    # print('score_all: ',score_all)
    # print('label_all: ',label_all)
    #
    # x = bbox_all[0][0]
    # y = bbox_all[0][1]
    # w = bbox_all[0][2]-bbox_all[0][0]
    # h = bbox_all[0][3]-bbox_all[0][1]
    # print('------------------------------------------------boxes: ',    x, y, x+w, y+h)
    #



# ############################################################## look the bbox  ###########################
# import os.path as osp
# import mmcv
# scene_root = osp.join( "/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/scene_gt_info.json")
# assert osp.exists(scene_root), scene_root
# gt_dict = mmcv.load(scene_root)  # key is str(obj_id)
#
# for i in range(0,100):
#     data_idx = gt_dict['{}'.format(i)]
#     print(data_idx[0]['bbox_obj'])
#
# for anno_i, anno in enumerate(gt_dict['0']):
#     bbox_obj = anno["bbox_obj"]
#     print(bbox_obj)
#     img = cv2.imread('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/rgb/0.jpg',1)
#     cv2.circle(img, (100, 100), 4, (0, 0, 255), -1)
#     #  drew bbox
#     x, y, w, h = bbox_obj[0],bbox_obj[1],bbox_obj[2],  bbox_obj[3]
#     font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
#     bgr_bbox = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
#     imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
#     cv2.imshow('s',imgzi)
#     cv2.waitKey()


# ##########################################  crop image ####################################################
# img = cv2.imread('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/rgb/0.jpg')
# x, y, w, h = 190, 115, 75, 86
# img = img[y: y+h,x:x+w]
# cv2.imshow('s', img)
# cv2.waitKey()

############################################   my render #####################################################
# from meshrenderer import meshrenderer_phong
#
# K = np.array([[621.399658203125, 0, 313.72052001953125], [0, 621.3997802734375, 239.97579956054688], [0, 0, 1]])
# ply_model_paths = [str('/home/sunh/6D_ws/init_pose/cube/cube_blender.ply')]
# renderer = meshrenderer_phong.Renderer(ply_model_paths,
#                     samples=1,
#                     vertex_scale=float(1)) # float(1) for some models
# R = np.array([[ 0.98854389 , -0.08867985, -0.12213457],
#  [0.00746701 , 0.83693685   ,0.54724853],
#  [ -0.15074884, -0.54006722  , 0.82801098]],
# )
#
# t = np.array([-0.07447373*1000, -0.06773148 *1000, 0.55685109*1000])
# # t = np.array([0, 0,700])
# bgr, depth = renderer.render(
#     obj_id=0,
#     W=640,
#     H=480,
#     K=K.copy(),
#     R=R,
#     t=t,
#     near=0.1,
#     far=3000,
#     random_light=False
# )
#
# bgr = cv2.resize(bgr, (640,480))
# image =  cv2.imread('/home/sunh/6D_ws/other_code/GDR-Net-main/datasets/BOP_DATASETS/lm/test/000000/rgb/0.jpg')
#
# g_y = np.zeros_like(bgr)
# g_y[:, :, 1] = bgr[:, :, 1]
# im_bg = cv2.bitwise_and(image, image, mask=(g_y[:, :, 1] == 0).astype(np.uint8))
# image_show = cv2.addWeighted(im_bg, 1, g_y, 1, 0)
#
#
# # cv2.imshow('', bgr)
# cv2.imshow('real', image_show)
# cv2.imshow('image', image)
# cv2.waitKey()


##############################################################################################
#######################################   write the json    ##################################
##############################################################################################
# filename = os.path.join("/home/robolab/6D_ws/raster_triangle/Linemod_preprocessed/renders/ape_gt.json")
# data = {}
# data_all = {}
# R = [0, 0, 0, 0.5, -0.0081, -0.812001, -103, 14,-99]
# t = [-105.3577515, -117.52119142, 1014.8770132]
# for i in range(0,5):
#     data ['cam_R_m2c']  = R
#     data ['cam_t_m2c']  = t
#     data_all[i] = [data]
#
# with open(filename, 'w') as file_obj:
#     json.dump(data_all, file_obj)

##############################################################################################
#######################################   learn the json    ##################################
##############################################################################################
# scene_root = osp.join( "/home/robolab/6D_ws/GDR-Net/datasets/BOP_DATASETS/lm/train_pbr/000000/scene_gt_info.json")
# assert osp.exists(scene_root), scene_root
# gt_dict = mmcv.load(scene_root)  # key is str(obj_id)
# print(gt_dict['0'])
# for anno_i, anno in enumerate(gt_dict['0']):
#     print('####################################::',anno)  #  {'bbox_obj': [-1, -1, -1, -1], 'bbox_visib': [-1, -1, -1, -1], 'px_count_all': 1420, 'px_count_valid': 1420, 'px_count_visib': 0, 'visib_fract': 0.0}
#     print('####################################::',anno_i)  # 0,1,2,
#     bbox_obj = anno["bbox_obj"]
#     px_count_all = anno["px_count_all"]
#     px_count_valid = anno["px_count_valid"]
#     px_count_visib = anno["px_count_visib"]
#
#     print('####################################::',bbox_obj)
#     img = cv2.imread('/home/robolab/6D_ws/GDR-Net/datasets/BOP_DATASETS/lm/train_pbr/000000/rgb/000000.jpg',1)
#     cv2.circle(img, (100, 100), 4, (0, 0, 255), -1)
#
#     #  drew bbox
#     x, y, w, h = bbox_obj[0],bbox_obj[1],bbox_obj[2],  bbox_obj[3]
#     font = cv2.FONT_HERSHEY_SIMPLEX  # 定义字体
#     bgr_bbox = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
#     imgzi = cv2.putText(bgr_bbox, 'ok', (x, y), font, 1, (0, 255, 255), 4)
#     cv2.imwrite('/home/robolab/6D_ws/raster_triangle/Linemod_preprocessed/renders/ape/{}.jpg'.format(anno_i), imgzi)


##############################################################################################
#######################################   read pkl    ########################################
##############################################################################################
# for i in range(0,5):
#     RT_path = os.path.join(
#         '/home/robolab/6D_ws/GDR-Net/datasets/lm_renders_blender/renders/ape/' + "{}_RT.pkl".format(i))
#     f = open(RT_path,'rb')
#     inf = pickle.load(f)
#     print(inf)

##############################################################################################
#######################################   pkl 2 txt   ########################################
##############################################################################################
# np.set_printoptions(threshold=np.inf) #解决显示不完全问题
# f = open('/home/robolab/6D_ws/GDR-Net/datasets/lm_imgn/dataset_dicts_lm_imgn_13_train_1k_per_obj_d15c2915bca6e643b351e7feae889fee.pkl','rb')
# inf = pickle.load(f)
# print(inf)
# #f = open('1.pkl', 'wb') #二进制打开,如果找不到该文件,则创建一个
# #pickle.dump(inf[0:995], f) #写入文件
# #f.close()  #关闭文件
# inf = str(inf)
# ft = open("/home/robolab/6D_ws/GDR-Net/datasets/lm_imgn/a.txt",'w')
# ft.write(inf)

###################################### open pkl ##########################################
# RT_path = os.path.join('/home/sunh/6D_ws/render_code/raster_triangle/sampled_poses/ape_sampled_RTs.pkl')
# f = open(RT_path,'rb')
# inf = pickle.load(f)
# print(inf[0])

import os
import pickle
import numpy as np
# import cv2
import pickle as pkl


# f = open("/home/sunh/6D_ws/render_code/raster_triangle/Linemod_preprocessed/renders/ape/0.pkl", 'rb')
# inf = pickle.load(f)
# print(inf)
# cv2.imshow('s',inf['rgb'].astype("uint8"))
# cv2.waitKey(100000)
# rt = np.load('/media/sunh/Samsung_T5/6D_data/PVNET/from_old/custom/pose/0.npy')
# print(rt)


###############################################################################################################
######################################     generate model info json(diameter is a problem)      ###############
###############################################################################################################
# from plyfile import PlyData
# model_path = os.path.join('/home/sunh/6D_ws/init_pose/peg/peg_blender.ply')
# ply = PlyData.read(model_path)
# data = ply.elements[0].data
# x = data['x']
# y = data['y']
# z = data['z']
# print(np.min(x))
# print(np.max(x)-np.min(x))
# print('-----------------------------------------------------------------')
# print(np.min(y))
# print(np.max(y)-np.min(y))
# print('-----------------------------------------------------------------')
# print(np.min(z))
# print(np.max(z)-np.min(z))





###############################################################################################################
##################################### generate pose_gt used by rgbd_render.py from icp's pose #################
###############################################################################################################
# pose_gt_all = np.load('/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt.npy')
# print('--------------------------------------------------------------------')
# print(pose_gt_all[0][0:3,0:4])
# pose_gt = []
# for i in range(len(pose_gt_all)):
#     pose_gt.append(pose_gt_all[i][0:3,0:4])
# filename = '/home/sunh/6D_ws/render_code/ObjectDatasetTools-master/LINEMOD/cube/pose_gt_for_render.npy'
# np.save(filename, pose_gt)
# print("Transforms saved")

###############################################################################  HOG + SVM ##########################
# import cv2 as cv
# import numpy as np
# import os
# import glob
# from skimage.feature import hog
#
# samples = [] #for HOGs
# labels = []  #for classes
#
#
# for i in range(1,11):
# 	filename = os.path.join('/home/sunh/6D_ws/home/1/'+'{}.jpg'.format(i))
# 	print('filenamefilename',filename)
# 	img = cv.imread(filename, 1)
# 	img=cv.resize(img, (500, 500))
# 	hist = hog(img,multichannel=True)
# 	samples.append(hist)
#
# samples = np.float32(samples)
#
# labels = np.array([0,0,0,0,0,1,1,1,1,1])
#
#
# rand = np.random.RandomState(421)
# shuffle = rand.permutation(len(samples))
# samples = samples[shuffle]
# labels = labels[shuffle]
#
# svm = cv.ml.SVM_create()
# svm.setType(cv.ml.SVM_C_SVC)
# svm.setKernel(cv.ml.SVM_RBF)
#
# svm.setGamma(1)#??defalt=1
# svm.setC(0.01)#??defalt=1
#
# print('samplessamples',samples)
# svm.train(samples, cv.ml.ROW_SAMPLE, labels)
# svm.save('/home/sunh/6D_ws/home/1.dat')


# import numpy as np
# import os
# import glob
# from skimage.feature import hog
# from skimage import io
# import time
# from xml.dom.minidom import parse
#
#
# start  = time.time()
#
#
# image = cv2.imread("/home/sunh/6D_ws/home/mmexport1629907910136.jpg")  # read sample.jpg.
#
#
#
# svm = cv2.ml.SVM_load('/home/sunh/6D_ws/home/1.dat')
# image = cv2.resize(image, (500, 500))
# hist = hog(image,multichannel=True)
# sample = np.float32(hist)
# resp = svm.predict(sample.reshape(1, -1))
#
# end  = time.time()
#
# print('cost time ',end-start)
# print(resp[1].ravel()[0])



#
# from lib.meshrenderer import meshrenderer_phong
#
# K = np.array([[621.399658203125, 0, 313.72052001953125], [0, 621.3997802734375, 239.97579956054688], [0, 0, 1]])
# ply_model_paths = [str('/home/sunh/6D_ws/other_code/EfficientPose/Linemod_preprocessed/models/obj_00.ply')]
# renderer = meshrenderer_phong.Renderer(ply_model_paths,
#                     samples=1,
#                     vertex_scale=float(1)) # float(1) for some models
#
#
# t = np.array([6.1600491e+01 ,4.6787190e-01, 5.6013763e+02])
# # t = np.array([0, 0,700])
# bgr, depth = renderer.render(
#     obj_id=0,
#     W=640,
#     H=480,
#     K=K.copy(),
#     R=R,
#     t=t,
#     near=0.1,
#     far=3000,
#     random_light=False
# )
# cv2.imshow('s', bgr)
# cv2.waitKey()


# ###################################################    #############################################################
import pyrealsense2 as rs
import open3d as o3d
import cv2
import  numpy as np
import math

# theta= np.array([2.0966845  , 0.33196357, -0.45598263])
#
# R_x = np.array([[1, 0, 0],
#                 [0, math.cos(theta[0]), -math.sin(theta[0])],
#                 [0, math.sin(theta[0]), math.cos(theta[0])]
#                 ])
#
# R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
#                 [0, 1, 0],
#                 [-math.sin(theta[1]), 0, math.cos(theta[1])]
#                 ])
#
# R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
#                 [math.sin(theta[2]), math.cos(theta[2]), 0],
#                 [0, 0, 1]
#                 ])
# R = np.dot(R_z, np.dot(R_y, R_x))
#
# t = np.array([-16.04237 ,-95.99062 ,585.7625 ])*0.001
# T= np.identity(4)
# T[0:3,0:3] = R
# T[0:3,3] = t
# source = o3d.io.read_point_cloud('/home/sunh/6D_ws/my_dataset/mesh/omron.ply')
# target = o3d.io.read_point_cloud('/home/sunh/6D_ws/other_code/EfficientPose/result/depth.ply')
# mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])
# source.transform(T)
# o3d.visualization.draw_geometries([source,target,mesh_frame])
#
# voxel_radius = [0.014, 0.01, 0.005, 0.001]
# max_iter = [50, 30, 14, 150]
#
# print("3. point cloud registration")
# for scale in range(4):
#     iter = max_iter[scale]
#     radius = voxel_radius[scale]
#     print([iter, radius, scale])
#
#     print("3-1. Downsample with a voxel size %.2f" % radius)
#
#     source_down = source.voxel_down_sample(radius)
#     target_down = target.voxel_down_sample(radius)
#
#     print("3-2. Estimate normal.")
#     source_down.estimate_normals(
#         o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
#     target_down.estimate_normals(
#         o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
#
#
#     print("3-3. Applying  point cloud registration")
#
#     result_icp = o3d.pipelines.registration.registration_icp(
#         source_down, target_down, 0.02, T,
#         o3d.pipelines.registration.TransformationEstimationPointToPoint(),
#         o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
#
#     T = result_icp.transformation
#     source.transform(T)
#     o3d.visualization.draw_geometries([source, target, mesh_frame])



#
# pipeline = rs.pipeline()
# config = rs.config()
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# profile = pipeline.start(config)
# 
# 
# depth_sensor = profile.get_device().first_depth_sensor()
# depth_sensor.set_option(rs.option.visual_preset, 3)
# depth_scale = depth_sensor.get_depth_scale()
# clipping_distance_in_meters = 8  # 8 meter
# clipping_distance = clipping_distance_in_meters / depth_scale
# 
# align_to = rs.stream.color
# align = rs.align(align_to)
# 
# print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<   Start Detection >>>>>>>>>>>>>>>>>>>>>>>>>>>>')
# while True:
#     frames = pipeline.wait_for_frames()
#     aligned_frames = align.process(frames)
#     aligned_depth_frame = aligned_frames.get_depth_frame()
#     color_frame = aligned_frames.get_color_frame()
# 
#     depth_image = np.asanyarray(aligned_depth_frame.get_data())
#     color_image = np.asanyarray(color_frame.get_data())
#     intrinsics = color_frame.profile.as_video_stream_profile().intrinsics
#     o3d_inter = o3d.camera.PinholeCameraIntrinsic(intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
#                                                   intrinsics.ppx, intrinsics.ppy)
# 
#     # cv2.imwrite('/home/sunh/6D_ws/other_code/EfficientPose/result/depth.png',depth_image)
# 
#     # depth = o3d.io.read_image('/home/sunh/6D_ws/other_code/EfficientPose/result/depth.png')
#     depth_bg = np.zeros((480, 640), dtype=np.uint16)
#     cv2.rectangle(depth_bg, (217, 234),
#                   (303, 293), (1, 1, 1), -1)
# 
#     cv2.rectangle(color_image, (217, 234),
#                   (303, 293), (1, 1, 1), -1)
#     cv2.imshow('s',color_image)
#     cv2.waitKey()
#     depth_bg = cv2.split(depth_bg)[0]
#     depth_new = depth_bg * depth_image
#     color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
#     rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
#         o3d.geometry.Image(color_image.copy()),
#         o3d.geometry.Image(depth_new),
#         depth_scale=1.0 / depth_scale,
#         depth_trunc=clipping_distance_in_meters,
#         convert_rgb_to_intensity=False)
#     pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, o3d_inter)
#     o3d.visualization.draw_geometries([pcd])



# mask = cv2.imread('/home/sunh/6D_ws/other_code/EfficientPose/tools/pybullet_dataset/mask.png')
# mask  = cv2.imread('/home/sunh/6D_ws/other_code/EfficientPose/tools/pybullet_dataset/depth.png',cv2.IMREAD_ANYDEPTH)
#
# # mask = cv2.split(mask)[0]
# mask[mask <= 1] = 0
# mask[mask > 0] = 255
# cv2.imshow('s',mask)
# cv2.imwrite('/home/sunh/6D_ws/other_code/EfficientPose/tools/pybullet_dataset/mask2.png',mask)
# cv2.waitKey()



# color_raw = o3d.io.read_image("/home/sunh/6D_ws/other_code/EfficientPose/tools/pybullet_dataset/rgb.png")
# depth_raw = o3d.io.read_image("/home/sunh/catkin_ws/src/hangdian_python/hangdian/depth_new.png")
# rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
# width, height = 640, 480
# intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
# target = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)


五、open3d相关

import cv2
import open3d as o3d

# cx = 950.267630
# cy = 538.395644
# fx = 1380.215613
# fy = 1380.945531
# cv_img = cv2.imread(
#     "/home/sunh/catkin_ws/src/hangdian_python/hangdian/colordata/color_0.png")  # 注意,yolov5预测的图片不是这里读取的,而是上面的‘data/hangdianimage’
# cv_depth = cv2.imread("/home/sunh/catkin_ws/src/hangdian_python/hangdian/dep_1.png")
#
# color_raw = o3d.geometry.Image(cv_img)
# # color_raw = cv_img
# depth_raw = o3d.geometry.Image(cv_depth)
# rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
# width, height = 1920, 1020
# intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
# target = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
# o3d.visualization.draw_geometries([target])

import pyrealsense2 as rs
import json
import numpy as np



# pipeline = rs.pipeline()
# config = rs.config()
# # Strt pipeline
# profile = pipeline.start(config)
# sensor = pipeline.get_active_profile().get_device().query_sensors()[0]
# sensor.set_option(rs.option.motion_range,100)
# pipeline.stop()

# frames = pipeline.wait_for_frames()
# color_frame = frames.get_color_frame()
#
# # Color Intrinsics
# intr = color_frame.profile.as_video_stream_profile().intrinsics
# camera_parameters = {'fx': intr.fx, 'fy': intr.fy,
#                      'ppx': intr.ppx, 'ppy': intr.ppy,
#                      'height': intr.height, 'width': intr.width,
#                      'depth_scale': profile.get_device().first_depth_sensor().get_depth_scale()
#                      }
# align_to = rs.stream.color
# align = rs.align(align_to)
# while True:
#     frames = pipeline.wait_for_frames()
#     aligned_frames = align.process(frames)
#
#     aligned_depth_frame = aligned_frames.get_depth_frame()
#     color_frame = aligned_frames.get_color_frame()
#
#     d = np.asanyarray(aligned_depth_frame.get_data())
#     c = np.asanyarray(color_frame.get_data())
#
#     cv2.imshow('depth',d)
#     cv2.imshow('color',c)
#     cv2.waitKey()
#
# with open('/home/sunh/catkin_ws/src/hangdian_python/' + 'intrinsics.json', 'w') as fp:
#     json.dump(camera_parameters, fp)

# from xml.dom.minidom import parse
# import os
#
#
#
#
# filepath = os.path.join('/home/sunh/catkin_ws/src/hangdian/svm_detect/excel/{}.xml'.format(12))  # the panel
#
# domTree = parse(filepath)
# rootNode = domTree.documentElement
# size = rootNode.getElementsByTagName("size")
# width = size[0].getElementsByTagName("width")[0].childNodes[0].data
#
# print(width)
#
# output=[]
# for  i  in range(len(objects)):
            # xmin=[]
            #    ymin=[]
            #    xmax=[]
            #    ymax=[]
            #    if objects[i].getElementsByTagName("name")[0].childNodes[0].data ==target:
            #       xmin=float(objects[i].getElementsByTagName("xmin")[0].childNodes[0].data)
            #       ymin=float(objects[i].getElementsByTagName("ymin")[0].childNodes[0].data)
            #       xmax=float(objects[i].getElementsByTagName("xmax")[0].childNodes[0].data)
            #       ymax=float(objects[i].getElementsByTagName("ymax")[0].childNodes[0].data)
            #       output.append([xmin,ymin,xmax,ymax])

############################################################  realsense  #############################################
import pyrealsense2 as rs
import numpy as np
import cv2
import time
import datetime
import threading
from IPython import embed

# check相机是不是进来了
# connect_device = []
# for d in rs.context().devices:
#     if d.get_info(rs.camera_info.name).lower() != 'platform camera':
#         connect_device.append(d.get_info(rs.camera_info.serial_number))
#
# if len(connect_device) < 2:
#     print('Registrition needs two camera connected.But got one.')
#     exit()
#
# pipeline1 = rs.pipeline()
# config = rs.config()
# config.enable_device(connect_device[0])
# print(connect_device[0])
# profile = pipeline1.start(config)
# sensor = pipeline1.get_active_profile().get_device().query_sensors()[0]
# sensor.set_option(rs.option.motion_range,100)
# pipeline1.stop()
#
# pipeline2 = rs.pipeline()
# config = rs.config()
# config.enable_device(connect_device[1])
# print(connect_device[1])
# profile = pipeline2.start(config)
# sensor = pipeline2.get_active_profile().get_device().query_sensors()[0]
# sensor.set_option(rs.option.motion_range,100)
# pipeline2.stop()
###########################################################################
# pipeline = rs.pipeline()
# config = rs.config()
# # Strt pipeline
# profile = pipeline.start(config)
# sensor = pipeline.get_active_profile().get_device(637202001815).query_sensors()[0]
# sensor.set_option(rs.option.motion_range,100)
# pipeline.stop()
############################################################  realsense  #############################################


###########################################  line detect ############################
# import cv2
# import numpy as np
#
# def line_detect(image):
#     # 将图片转换为HSV
#     hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#
#     # 设置阈值
#     lowera = np.array([0, 0, 221])
#     uppera = np.array([180, 30, 255])
#     mask1 = cv2.inRange(hsv, lowera, uppera)
#     kernel = np.ones((3, 3), np.uint8)
#
#     # 对得到的图像进行形态学操作(闭运算和开运算)
#     mask = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, kernel) #闭运算:表示先进行膨胀操作,再进行腐蚀操作
#     mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)   #开运算:表示的是先进行腐蚀,再进行膨胀操作
#     cv2.imshow("mask",mask)
#     cv2.waitKey()
#
#     # 绘制轮廓
#     edges = cv2.Canny(mask, 50, 150, apertureSize=3)
#     # 显示图片
#     cv2.imshow("edges", edges)
#     cv2.waitKey()
#
#     # 检测白线    这里是设置检测直线的条件,可以去读一读HoughLinesP()函数,然后根据自己的要求设置检测条件
#     lines = cv2.HoughLinesP(edges, 1, np.pi / 360, 10,minLineLength=10,maxLineGap=50)
#
#     print( "lines=",lines)
#     print("========================================================")
#     i=1
#     # 对通过霍夫变换得到的数据进行遍历
#     for line in lines:
#         # newlines1 = lines[:, 0, :]
#         print ("line["+str(i-1)+"]=",line)
#         x1,y1,x2,y2 = line[0]   #两点确定一条直线,这里就是通过遍历得到的两个点的数据 (x1,y1)(x2,y2)
#         cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)     #在原图上画线
#         # 转换为浮点数,计算斜率
#         x1 = float(x1)
#         x2 = float(x2)
#         y1 = float(y1)
#         y2 = float(y2)
#         print ( "x1=%s,x2=%s,y1=%s,y2=%s" % (x1, x2, y1, y2))
#         if x2 - x1 == 0:
#             print ("直线是竖直的")
#             result=90
#         elif y2 - y1 == 0 :
#             print( "直线是水平的")
#             result=0
#         else:
#             # 计算斜率
#             k = -(y2 - y1) / (x2 - x1)
#             # 求反正切,再将得到的弧度转换为度
#             result = np.arctan(k) * 57.29577
#             print ("直线倾斜角度为:" + str(result) + "度")
#         i = i+1
#     #     显示最后的成果图
#     cv2.imshow("line_detect",image)
#     return result
#
# if __name__ == '__main__':
#     # 读入图片
#     src = cv2.imread("/home/sunh/Desktop/hangdain_ourpanel/switch/4/boduan/00.png")
#     # 设置窗口大小
#     cv2.namedWindow("input image", cv2.WINDOW_AUTOSIZE)
#     # 显示原始图片
#     cv2.imshow("input image", src)
#     # 调用函数
#     line_detect(src)
#     cv2.waitKey(0)

##########################################################  copy png and xml #############################################
# import shutil
# number = 68
# panel = cv2.imread('/home/sunh/Desktop/hangdain_ourpanel/{}.png'.format(number))
# panel_xml= '/home/sunh/Desktop/hangdain_ourpanel/{}.xml'.format(number) # 放入存储图片和xml的文件名
#
# for i in range(420,450):
#     cv2.imwrite('/home/sunh/Desktop/hangdain_ourpanel/data/{}.png'.format(i),panel)
#
#     tar_xml = '/home/sunh/Desktop/hangdain_ourpanel/data/{}.xml'.format(i)
#     shutil.copy(panel_xml, tar_xml)
#

##########################################################  numpy 2 open3d #############################################

import numpy as np
from open3d import*
import time

#
# cx = 950.267630
# cy = 538.395644
# fx = 1380.215613
# fy = 1380.945531
#
# color = cv2.imread("/home/sunh/catkin_ws/src/hangdian_python/hangdian/colordata/color_0.png")
# # depth_bg = cv2.imread("/home/sunh/catkin_ws/src/hangdian_python/hangdian/depth_bg.png")
# depth_bg = np.zeros((1080, 1920),dtype=np.uint16)
#
# cv2.rectangle(depth_bg, (860, 198), (1514, 543), (1, 1, 1), -1)
# cv2.imshow('s',depth_bg)
# cv2.waitKey()
# depth_bg = cv2.split(depth_bg)[0]
#
# depth = cv2.imread("/home/sunh/catkin_ws/src/hangdian_python/hangdian/depth.png",-1)
# depth = cv2.split(depth)[0]
# depth[depth > 400] = 0
# depth = depth_bg*depth
# cv2.imwrite('/home/sunh/catkin_ws/src/hangdian_python/hangdian/depth_1.png',depth)
#
#

# cx = 323.844
# cy = 232.171
# fx = 615.3
# fy = 615.3
# color_raw = o3d.io.read_image("/media/sunh/Samsung_T51/yangzhengtao/realsense_test/rgb/1636292942.357203.png")
# depth_raw = o3d.io.read_image("/media/sunh/Samsung_T51/yangzhengtao/realsense_test/depth/1636292942.357203.png")
# rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
# width, height = 640, 480
# intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
# target = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
# mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size= 0.5, origin=[0, 0, 0])
# open3d.visualization.draw_geometries([target,mesh_frame])

# source = o3d.io.read_point_cloud("/home/sunh/6D_ws/init_pose/png2pcd/build/biaoding.pcd")
# target = o3d.io.read_point_cloud("/home/sunh/6D_ws/init_pose/png2pcd/build/pointcloud.pcd")
# open3d.visualization.draw_geometries([target,mesh_frame])

########################################################################################
########################################################################################
# import matplotlib.pyplot as plt
# import cv2 as cv
# from skimage import data
# img = cv2.imread('/home/sunh/Desktop/boduan/15/0.png')
# hist0 = cv.calcHist([img],[0],None,[256],[0,255])
# hist1 = cv.calcHist([img],[1],None,[256],[0,255])
# hist2 = cv.calcHist([img],[2],None,[256],[0,255])
# plt.figure(figsize=(6,6))
# plt.plot(range(256),hist0,label = 'R')
# plt.plot(range(256),hist1,label = 'G')
# plt.plot(range(256),hist2,label = 'B')
# plt.legend()
# plt.title("累积直方图")

# os.system('rosrun dynamic_reconfigure dynparam set /camera/rgb_camera/ exposure 100')
# for i in range(0,6):
# 	filename = os.path.join('/home/sunh/Desktop/boduan/15/3/'+'{}.png'.format(i))
# 	print('filenamefilename',filename)
# 	img = cv2.imread(filename, 1)
# 	# img=cv.resize(img, (60, 60))
# 	# hist = cv.calcHist([img], [0], None, [256], [0.0, 255.0])
# 	img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# 	# b, g, r = cv2.split(img)
# 	h, s, v = cv2.split(img)
# 	#
# 	cv2.imwrite('/home/sunh/Desktop/boduan/15/3/{}_h.png'.format(i),h)
# 	cv2.imwrite('/home/sunh/Desktop/boduan/15/3/{}_s.png'.format(i),s)
# 	cv2.imwrite('/home/sunh/Desktop/boduan/15/3/{}_v.png'.format(i),v)
############################################################### HOG plot ###############################3
# import cv2
# import matplotlib.pyplot as plt
# from skimage.feature import hog
# from skimage import data, exposure
#
# img = []
# hog_all = []
# for i in range(3):
#     image = cv2.imread('/media/sunh/G/hangdian_bag/hangdain_ourpanel/switch/16/9/{}.png'.format(i))
#     image = cv2.resize(image, (128, 128))
#     image = image[:, :, ::-1]
#     fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
#                         cells_per_block=(1, 1), visualize=True, multichannel=True)
#     hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
#
#     img.append(image)
#     hog_all.append(hog_image_rescaled)
#
#
#
# fig, ax = plt.subplots(2, 3, figsize=(50, 50), sharex=True, sharey=True)
# ax[0][0].axis('on')
# ax[0][0].imshow(img[0], cmap=plt.cm.gray)
# ax[0][0].set_title('Input image')
# # Rescale histogram for better display
# ax[1][0].axis('on')
# ax[1][0].imshow(hog_all[0], cmap=plt.cm.gray)
# ax[1][0].set_title('HOG Feature')
# ##################################################3
# ax[0][1].axis('on')
# ax[0][1].imshow(img[1], cmap=plt.cm.gray)
# ax[0][1].set_title('Input image')
# # Rescale histogram for better display
# ax[1][1].axis('on')
# ax[1][1].imshow(hog_all[1], cmap=plt.cm.gray)
# ax[1][1].set_title('HOG Feature')
# ##################################################3
# ax[0][2].axis('on')
# ax[0][2].imshow(img[2], cmap=plt.cm.gray)
# ax[0][2].set_title('Input image')
# # Rescale histogram for better display
# ax[1][2].axis('on')
# ax[1][2].imshow(hog_all[2], cmap=plt.cm.gray)
# ax[1][2].set_title('HOG Feature')
##################################################3
# ax[0][3].axis('on')
# ax[0][3].imshow(img[3], cmap=plt.cm.gray)
# ax[0][3].set_title('Input image')
# # Rescle histogram for better display
# ax[1][3].axis('on')
# ax[1][3].imshow(hog_all[3], cmap=plt.cm.gray)
# ax[1][3].set_title('HOG Feature')
# plt.show()
############################################################### HOG plot ###############################3



###############################################################  test other point cloud rough match  ###############################3
# cx = 950.267630
# cy = 538.395644
# fx = 1380.215613
# fy = 1380.945531
# source = o3d.io.read_triangle_mesh('/home/sunh/catkin_ws/src/hangdian_python/hangdian/sunhan/model/16.ply')
#
# color_raw = o3d.io.read_image('/home/sunh/catkin_ws/src/hangdian_python/hangdian/colordata/color_0.png')
# depth_raw = o3d.io.read_image("/home/sunh/catkin_ws/src/hangdian_python/hangdian/depth_new.png")
# rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
# width, height = 1920, 1020
# intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
# target = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
# o3d.io.write_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/16.pcd',target)

# source = o3d.io.read_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/hangdian/sunhan/my_model/162.ply', format='xyzrgb')
# source1 = o3d.io.read_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/1.pcd')
# source2 = o3d.io.read_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/2.pcd')
# source3 = o3d.io.read_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/3.pcd')
# source1.paint_uniform_color([1, 0., 0.])
# source2.paint_uniform_color([0, 0 , 1])
# source3.paint_uniform_color([0, 1, 0])


# source = source1 + source2 + source3
#
# o3d.io.write_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/model.pcd',source)
# o3d.io.write_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/model.ply',source)
# o3d.visualization.draw_geometries([source])

# cx = 950.267630
# cy = 538.395644
# fx = 1380.215613
# fy = 1380.945531
#
# for i in range(5):
#     exp = i+1
#     color_raw = o3d.io.read_image('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/exp/{}/color_0.png'.format(exp))
#     depth_raw = o3d.io.read_image("/home/sunh/catkin_ws/src/hangdian_python/ICIEA/exp/{}/depth_new.png".format(exp))
#     rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
#     width, height = 1920, 1020
#     intrinsic = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
#     source = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)
#     o3d.io.write_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/exp/{}.ply'.format(exp), source)

###############################################################  G  pc match ###############################3
#
# '''
# Author: dongcidaci
# Date: 2021-09-14 10:08:14
# LastEditTime: 2021-09-14 10:31:45
# LastEditors: Please set LastEditors
# Description: In User Settings Edit
# FilePath: \open3d_code\2_03_pointcloudpeizhun.py
# '''
# import open3d as o3d
# import numpy as np
# import copy
# import time
# from scipy.spatial.transform import Rotation as R
#
#
#
# # 点云全局配准
# # 可视化
# # 该辅助函数可以将配准的源点云和目标点云一起可视化
# def draw_registration_result(source, target, transformation):
#     source_temp = copy.deepcopy(source)
#     target_temp = copy.deepcopy(target)
#     source_temp.paint_uniform_color([1, 0.706, 0])
#     target_temp.paint_uniform_color([0, 0.651, 0.929])
#     source_temp.transform(transformation)
#     o3d.visualization.draw_geometries([source_temp, target_temp])
#
#
# # 提取几何特征
# # 降采样点云,估计法线,之后对每个点计算FPFH特征.FPFH特征是一个描述点的局部几何属性的33维的向量.
# # 在3维空间中进行最近邻查询可以返回具有近似几何结构的点.
# def preprocess_point_cloud(pcd, voxel_size):
#     print(":: Downsample with a voxel size %.3f." % voxel_size)
#     pcd_down = pcd.voxel_down_sample(voxel_size)
#
#     radius_normal = voxel_size * 2
#     print(":: Estimate normal with search radius %.3f." % radius_normal)
#     pcd_down.estimate_normals(
#         o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
#
#     radius_feature = voxel_size * 5
#     print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
#     pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
#         pcd_down,
#         o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
#     return pcd_down, pcd_fpfh
#
#
# # 输入
# # 以下代码从两个文件中读取源点云和目标点云.这一对点云使用单位矩阵作为初始矩阵之后是不对齐的.
# def prepare_dataset(source, target,voxel_size):
#     print(":: Load two point clouds and disturb initial pose.")
#
#
#
#     # target.paint_uniform_color([0, 0 , 1])
#
#     # draw_registration_result(source, target, np.identity(4))
#
#     source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
#     target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
#     return source_down, target_down, source_fpfh, target_fpfh
#
#
# # RANSAC
# # 使用RANSAC进行全局配准.在RANSAC迭代中,每次从源点云中选取 ransac_n 个随机点.
# # 通过在3维FPFH特征空间中查询最邻近,可以在目标点云中找到他们的对应点.剪枝步骤需要使用快速修剪算法来提早拒绝错误匹配.
# # Open3d提供以下剪枝算法:
#
# # CorrespondenceCheckerBasedOnDistance检查对应的点云是否接近(也就是距离是否小于指定阈值)
# # CorrespondenceCheckerBasedOnEdgeLength检查从源点云和目标点云对应中分别画上两条任意边(两个顶点连成的线)是否近似.
# # CorrespondenceCheckerBasedOnNormal考虑的所有的对应的顶点法线的密切关系.他计算了两个法线向量的点积.使用弧度作为阈值.
# # 只有通过剪枝步骤的匹配才用于转换,该转换将在整个点云上进行验证.
# # 核心函数是 registration_ransac_based_on_feature_matching. RANSACConvergenceCriteria是里面一个十分重要的超参数.
# # 他定义了RANSAC迭代的最大次数和验证的最大次数.这两个值越大,那么结果越准确,但同时也要花费更多的时间.
# def execute_global_registration(source_down, target_down, source_fpfh,
#                                 target_fpfh, voxel_size):
#     distance_threshold = voxel_size * 1.5
#     print(":: RANSAC registration on downsampled point clouds.")
#     print("   Since the downsampling voxel size is %.3f," % voxel_size)
#     print("   we use a liberal distance threshold %.3f." % distance_threshold)
#     result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
#         source_down, target_down, source_fpfh, target_fpfh, True,
#         distance_threshold,
#         o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
#         3, [
#             o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(
#                 0.9),
#             o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(
#                 distance_threshold)
#         ], o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))
#     return result
#
#
# # 快速全局配准
# # 我们采用和基准相同的输入
# def execute_fast_global_registration(source_down, target_down, source_fpfh,
#                                      target_fpfh, voxel_size):
#     distance_threshold = voxel_size * 0.5
#     print(":: Apply fast global registration with distance threshold %.3f" \
#           % distance_threshold)
#     result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(
#         source_down, target_down, source_fpfh, target_fpfh,
#         o3d.pipelines.registration.FastGlobalRegistrationOption(
#             maximum_correspondence_distance=distance_threshold))
#     return result
#
# # 局部优化
# # 由于性能原因,全局配准只能在大规模降采样的点云上执行,配准的结果不够精细,我们使用 Point-to-plane ICP 去进一步优化配准结果.
# def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):
#     distance_threshold = voxel_size * 0.4
#     print(":: Point-to-plane ICP registration is applied on original point")
#     print("   clouds to refine the alignment. This time we use a strict")
#     print("   distance threshold %.3f." % distance_threshold)
#     result = o3d.pipelines.registration.registration_icp(
#         source, target, distance_threshold, result_ransac.transformation,
#         o3d.pipelines.registration.TransformationEstimationPointToPoint())
#     return result
#
#
#
#
#
# voxel_size = 0.05  # means 5cm for this dataset
# for i in range(5):
#     start = time.time()
#     target = o3d.io.read_point_cloud("/home/sunh/catkin_ws/src/hangdian_python/ICIEA/exp/{}.ply".format(i+1))
#     source= o3d.io.read_point_cloud("/home/sunh/catkin_ws/src/hangdian_python/hangdian/sunhan/my_model/162 (copy).ply",
#                                      format='xyzrgb')
#     source_down, target_down, source_fpfh, target_fpfh = prepare_dataset(source, target, voxel_size)
#
#     result_ransac = execute_global_registration(source_down, target_down,
#                                                 source_fpfh, target_fpfh,
#                                                 voxel_size)
#     # print('result_ransac',result_ransac)
#     # draw_registration_result(source, target, result_ransac.transformation)
#
#
#     end = time.time()
#     print('time :', end - start)
#
#     voxel_radius = [0.014, 0.01, 0.005, 0.001]
#     max_iter = [50, 30, 14, 150]
#     current_transformation = result_ransac.transformation
#
#     print("3. Colored point cloud registration")
#     for scale in range(4):
#         iter = max_iter[scale]
#         radius = voxel_radius[scale]
#         print([iter, radius, scale])
#
#         print("3-1. Downsample with a voxel size %.2f" % radius)
#
#         source_down = source.voxel_down_sample(radius)
#         target_down = target.voxel_down_sample(radius)
#
#         print("3-2. Estimate normal.")
#         source_down.estimate_normals(
#             o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
#         target_down.estimate_normals(
#             o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
#
#         print("3-3. Applying colored point cloud registration")
#
#         result_icp = o3d.pipelines.registration.registration_icp(
#             source_down, target_down, 0.02, current_transformation,
#             o3d.pipelines.registration.TransformationEstimationPointToPlane(),
#             o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
#
#         current_transformation = result_icp.transformation
#         print(result_icp)
#
#     # draw_registration_result(source, target, current_transformation)
#
#     print('time :', time.time() - start)
#
#
#     T = current_transformation
#     Rot = T[[0, 1, 2]]
#     Rot = Rot[:, [0, 1, 2]]
#     q = R.from_matrix(Rot)
#     quaternion = q.as_quat()
#     print("calibration results: rosrun tf static_transform_publisher " + str( T[0, 3]) + ' ' + str(
#         T[1, 3]) + ' ' + str( T[2, 3]) + ' ' + str(quaternion[0]) + ' ' + str(quaternion[1]) + ' ' + str(
#         quaternion[2]) + ' ' + str(quaternion[3]) + " /camera_link /obj 50")


#
# # 快速全局配准
# # 由于无数的模型推荐和评估,导致基于RANSAC的全局配准需要很长的时间.
# # 提出了一种加速的方法,该方法可以快速的优化几乎没有对应关系的线处理权重.这样在每次迭代的时候没有模型建议和评估,该方法就在计算的时候节约的大量的时间.
# voxel_size = 0.05  # means 5cm for the dataset
#
# start = time.time()
# source, target, source_down, target_down, source_fpfh, target_fpfh = \
#     prepare_dataset(voxel_size)
# # 基准
# # 在下面代码中,我们将计时全局配准算法.
#
# result_ransac = execute_global_registration(source_down, target_down,
#                                             source_fpfh, target_fpfh,
#                                             voxel_size)
#
# print("Global registration took %.3f sec.\n" % (time.time() - start))
# # draw_registration_result(source_down, target_down,
# #                          result_ransac.transformation)
# start = time.time()
#
# result_fast = execute_fast_global_registration(source_down, target_down,
#                                                source_fpfh, target_fpfh,
#                                                voxel_size)
#
# print("Fast global registration took %.3f sec.\n" % (time.time() - start))
# draw_registration_result(source, target,
#                          result_fast.transformation)




############################################  open3d remove outline point #######################################
def display_inlier_outlier(cloud, ind):
    inlier_cloud = cloud.select_by_index(ind)
    outlier_cloud = cloud.select_by_index(ind, invert=True)

    print("Showing outliers (red) and inliers (gray): ")
    outlier_cloud.paint_uniform_color([1, 0, 0])
    inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])
    o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud], window_name='Open3D Removal Outlier', width=1920,
                                      height=1080, left=50, top=50, point_show_normal=False, mesh_show_wireframe=False,
                                      mesh_show_back_face=False)

target = o3d.io.read_point_cloud('/home/sunh/catkin_ws/src/hangdian_python/ICIEA/exp/1.ply')
print("Downsample the point cloud with a voxel of 0.003")
downpcd = target.voxel_down_sample(voxel_size=0.003)
print(downpcd)
o3d.visualization.draw_geometries([downpcd], window_name='Open3D downSample', width=1920, height=1080, left=50, top=50,
                                  point_show_normal=False, mesh_show_wireframe=False, mesh_show_back_face=False)

# 重新计算平面法线
# 顶点法线估计【Vertex normal estimation】
# 点云的另一个基本操作是点法线估计。按n查看法线。键-和键+可用于控制法线的长度。
print("Recompute the normal of the downsampled point cloud")
downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
o3d.visualization.draw_geometries([downpcd], window_name='Open3D downSample Normals', width=1920, height=1080, left=50,
                                  top=50, point_show_normal=True, mesh_show_wireframe=False, mesh_show_back_face=False)

# 离群点去除 【outlier removal】
# 点云离群值去除 从扫描设备收集数据时,可能会出现点云包含要消除的噪声和伪影的情况。本教程介绍了Open3D的异常消除功能。 准备输入数据,使用降采样后的点云数据。
# statistical_outlier_removal 【统计离群值移除】 删除与点云的平均值相比更远离其邻居的点。
#          它带有两个输入参数:nb_neighbors 允许指定要考虑多少个邻居,以便计算给定点的平均距离
#                           std_ratio 允许基于跨点云的平均距离的标准偏差来设置阈值级别。此数字越低,过滤器将越具有攻击性
#
# radius_outlier_removal 【半径离群值去除】  删除在给定球体中周围几乎没有邻居的点。
#          两个参数可以调整以适应数据:nb_points 选择球体应包含的最小点数
#                                  radius 定义将用于计算邻居的球体的半径
print("Statistical oulier removal")
cl, ind = downpcd.remove_statistical_outlier(nb_neighbors=15, std_ratio=2.0)

display_inlier_outlier(cl, ind)
downpcd_inlier_cloud = downpcd.select_by_index(ind)
print(downpcd_inlier_cloud)

############################################  opencv show point  #####################################

# import cv2
#
# image = cv2.imread('/home/sunh/catkin_ws/src/hangdian_python/hangdian/colordata/color_0.png')
#
# h,w,_ = image.shape
#
# cv2.circle(image,(int(w/2), int(h/2) +60 ), 5, (0,0,255),2 )
# cv2.circle(image,(int(w/2) + 150, int(h/2) +60 ), 5, (0,0,255),2 )
# cv2.circle(image,(int(w/2), int(h/2) +60  +150), 5, (0,0,255),2 )
#
# cv2.imshow('s',image)
# cv2.waitKey(0)

总结

提示:这里对文章进行总结:
例如:以上就是今天要讲的内容,本文仅仅简单介绍了pandas的使用,而pandas提供了大量能使我们快速便捷地处理数据的函数和方法。

  • 1
    点赞
  • 29
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值