基于D435i的点云重建

Task: 采用D435i采集深度图和RGB图像,进行点云重建和聚类。

1)解析Bag数据:

import os
import cv2
import numpy as np
import rosbag
from cv_bridge import CvBridge
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2

import pcl
from pcl import pcl_visualization
import pyrealsense2 as rs

import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage.segmentation import chan_vese
import time
import copy


if __name__ == "__main__":   
 visual = pcl_visualization.CloudViewing()
    pc = rs.pointcloud()
    points = rs.points()

    bag = rosbag.Bag(file_path, "r")
    bag_data = bag.read_messages()
    info = bag.get_type_and_topic_info()
    print(info)

    bridge = CvBridge()
    depth = None; points_tp = None; cv_img10 = None

    cap = cv2.VideoWriter(video_output, cv2.VideoWriter_fourcc(*'MP4V'), 20.0, (1280*2+3, 720))
    nn = 0
    for topic, msg, t in bag_data:
        print(topic)
        if topic == "/cam_1/aligned_depth_to_color/image_raw/compressed":
            depth = bridge.compressed_imgmsg_to_cv2(msg)
            show_img(depth, "Depth Image")
        elif topic == "/cam_1/color/image_raw/compressed":
            cv_img = bridge.compressed_imgmsg_to_cv2(msg)
            show_img(cv_img, "Cam1")
        elif topic == "/cam_2/aligned_depth_to_color/image_raw/compressed":
            depth_10 = bridge.compressed_imgmsg_to_cv2(msg)
        elif topic == "/cam_2/color/image_raw/compressed":
            cv_img10 = bridge.compressed_imgmsg_to_cv2(msg)
            show_img(cv_img10, "Cam2")
            seg_color, add_image = visual_pcl(visual, depth_10, cv_img10)
            save_seg_video(cap, cv_img10, add_image)
            # if nn == 2499:    # 保存某一帧的数据和结果
            #     seg, seg_color, add_image = visual_pcl(visual, depth_10, cv_img10)
            # #     cv2.imwrite("./save_data/test_rgb.png", cv_img10)
            # #     cv2.imwrite("./save_data/test_depth.png", depth_10)
            # #     xyz_sand = depth2xyz(depth_10, depth_camera_metrix)
            # #     xyz_sand = xyz_sand.reshape(-1, 3)
            # #     with open("./save_data/points.txt", "w") as f:
            # #         for i in range(len(xyz_sand)):
            # #             strings = str(xyz_sand[i][0]) + "," + str(xyz_sand[i][1]) + "," + str(xyz_sand[i][2]) +"\n"
            # #             f.write(strings)
        elif topic == "/cam_3/color/image_raw/compressed":
            cv_img = bridge.compressed_imgmsg_to_cv2(msg)
            show_img(cv_img, "Cam3")
        nn = nn + 1
        print(nn)

    cap.release()
    print("end")

2)分割图像:

# 图像显示
def show_img(img, window_name):
    if SHOW_IMAGE:
        cv2.imshow(window_name, img)
        cv2.waitKey(3)

# 寻找最大连通域
def find_max_region(mask_sel):
    contours, hierarchy = cv2.findContours(mask_sel,cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
 
    #找到最大区域并填充 
    area = []
 
    for j in range(len(contours)):
        area.append(cv2.contourArea(contours[j]))
 
    max_idx = np.argmax(area)
  
    for k in range(len(contours)):
    
        if k != max_idx:
            cv2.fillPoly(mask_sel, [contours[k]], 0)
        # elif k == max_idx:
        #     cv2.fillPoly(mask_sel, [contours[k]], 255)
    return mask_sel


# 填充孔洞
def FillHole(img):

    # 复制 im_in 图像
    im_floodfill = img.copy()
    
    # Mask 用于 floodFill,官方要求长宽+2
    h, w = img.shape[:2]
    mask = np.zeros((h+2, w+2), np.uint8)
    
    # floodFill函数中的seedPoint必须是背景
    isbreak = False
    seedPoint = None
    for i in range(im_floodfill.shape[0] - 2):
        for j in range(im_floodfill.shape[1] - 2):
            if(im_floodfill[i][j]==0):
                seedPoint=(i,j)
                isbreak = True
                break
        if(isbreak):
            break
    # 得到im_floodfill
    try:
        if seedPoint is not None:
            cv2.floodFill(im_floodfill, mask, seedPoint, 255)

            # 得到im_floodfill的逆im_floodfill_inv
            im_floodfill_inv = cv2.bitwise_not(im_floodfill)
            # 把im_in、im_floodfill_inv这两幅图像结合起来得到前景
            im_out = img | im_floodfill_inv
            return im_out
        else:
            return img
    except:
        return img



def segmentation_rgb(img):
    origin_img = img
    show_img(origin_img, "Origin R image")

    img = cv2.GaussianBlur(img, (5, 5), 1)
    h, w, c = img.shape
    ret3, segr = cv2.threshold(img[:, :, 2], 0, 255, cv2.THRESH_OTSU)

    show_img(segr, "Origin Seg R")

    kernel = np.ones((5, 5), np.uint8)
    segr = cv2.morphologyEx(segr, cv2.MORPH_OPEN, kernel, iterations=2)
    show_img(segr, "Seg R Open Operation")

    segr = cv2.dilate(segr, kernel, iterations=3)
    show_img(segr, "Seg R Dilate Operation")

    segr = FillHole(segr)

    show_img(segr, "Seg R Fill Hole")

    img[:, :, 2][segr > 0] = 0

    # 再次分割
    ret3, segrnew = cv2.threshold(img[:, :, 2], 0, 255, cv2.THRESH_OTSU)
    segrnew[segr == 255] = 0

    show_img(segrnew, "Re-seg R SEG")

    show_img(img[:, :, 0], "B")
    show_img(img[:, :, 2], "R")
    show_img(img[:, :, 1], "G")

    segr = find_max_region(segrnew)
    mm = segr[190:, :]
    tp = FillHole(mm)
    segr[190:, :] = tp
    show_img(segr, "Re R SEG - find max region and fill hole")
    add_image = show_mask_on_image(origin_img, segr)

    dp_car = copy.copy(depth)
    dp_sand = copy.copy(depth)
    dp_car[segr == 255] = 0
    dp_sand[segr == 0] = 0
    show_img(dp_car, "Depth - Car")
    show_img(dp_sand, "Depth - Sand")

    return segr, add_image

3)深度图计算点云图:

# 由深度图,计算点云图
def depth2xyz(depth_map,depth_cam_matrix,flatten=False,depth_scale=1000):
    fx,fy = depth_cam_matrix[0,0],depth_cam_matrix[1,1]
    cx,cy = depth_cam_matrix[0,2],depth_cam_matrix[1,2]
    h,w=np.mgrid[0:depth_map.shape[0],0:depth_map.shape[1]]
    z=depth_map/depth_scale
    x=(w-cx)*z/fx
    y=(h-cy)*z/fy
    xyz=np.dstack((x,y,z)) if flatten==False else np.dstack((x,y,z)).reshape(-1,3)
    return xyz

4)点云重建和可视化:

def visual_pcl(visual, depth, color):
    t1 = time.time()
    seg_color, add_image = segmentation_rgb(color)
    print("Segmentation time: ", time.time() - t1, "s")

    if GET_POINT:
        seg_color[seg_color == 0] = 20
        seg_color = cv2.cvtColor(seg_color, cv2.COLOR_GRAY2BGR)

        depth_sand = depth

        t2 = time.time()
        xyz_sand = depth2xyz(depth_sand, depth_camera_metrix)
        print("Depth to point cloud time: ", time.time() - t2, "s")
        xyz_sand = xyz_sand.reshape(-1, 3)

        seg_color = seg_color.reshape(-1,3)
        color = color.reshape(-1,3)
        # seg_color[seg_color > 0] = 255    # 聚类前结果,修改颜色值即可

        # kmeans(xyz_sand, xyz_vehicle)     # 采用kmeans方法进行聚类

        cloud = pcl.PointCloud_PointXYZRGB()
        temp = np.zeros((len(xyz_sand), 4), np.float32)
        length = len(xyz_sand)
        for i in range(length):
            temp[i][0] = xyz_sand[i][0] * 1000.0
            temp[i][1] = xyz_sand[i][1] * 1000.0
            temp[i][2] = xyz_sand[i][2] * 1000.0
            # temp[i][3] = 65536.0 * color[i][2] + 256.0*color[i][1] + color[i][0]      # 用原图像的颜色信息渲染点云
            temp[i][3] = 65536.0 * seg_color[i][2] + 256.0*seg_color[i][1] + seg_color[i][0]

        cloud.from_array(temp)
        visual.ShowColorCloud(cloud, b"color")

    return seg_color, add_image

结果:

 

 

  • 2
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值