python 立体图像_opencv0:双目立体视觉(python 代码全)

0.双目立体视觉的基本建立步骤

a)双目标定(samples/cpp/stereo_calib.cpp),由一套操作完成。

b)图像根据标定结果进行极线矫正(stereoRectify 函数)

c)在每条极线上寻找对应点(视差)(也有很多种选择,StereoMatcher)

d)根据视差转换为点云(cv2.reprojectImageTo3D)

e)点云存储(samples/python/stereo_match.py/write_ply)和显示

1. 双目棋盘格标定详解

1.1 c++例子中标定的函数:

1StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified);

需要一系列的图像

标定板格子的个数(比如8*6)

标定板格子的尺寸(比如20mm)

displayCorners 是否显示角点

useCalibrated 是否使用标定结果

showRectified 是否展示矫正结果

1.2 标定的流程

找到亚像素的角点,imagePoints[0]和imagePoints[1],分别对应左右两图;

1

2findChessboardCorners

cornerSubPix

构建标定板的点坐标,objectPoints

1objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));

3.分别得到两个相机的初始CameraMatrix

1

2

3Mat cameraMatrix[2], distCoeffs[2];

cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);

cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);

4.双目视觉进行标定

1

2

3

4

5

6

7

8

9

10

11

12

13Mat R, T, E, F;

double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],

cameraMatrix[0], distCoeffs[0],

cameraMatrix[1], distCoeffs[1],

imageSize, R, T, E, F,

CALIB_FIX_ASPECT_RATIO +

CALIB_ZERO_TANGENT_DIST +

CALIB_USE_INTRINSIC_GUESS +

CALIB_SAME_FOCAL_LENGTH +

CALIB_RATIONAL_MODEL +

CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,

TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );

标定精度的衡量,这部分注释就够了

1

2

3

4

5// CALIBRATION QUALITY CHECK

// because the output fundamental matrix implicitly

// includes all the output information,

// we can check the quality of calibration using the

// epipolar geometry constraint: m2^t*F*m1=0

保存标定结果

1略

矫正一张图像看看,是否可以

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16Mat R1, R2, P1, P2, Q; // 说明

Rect validRoi[2];

stereoRectify(cameraMatrix[0], distCoeffs[0],

cameraMatrix[1], distCoeffs[1],

imageSize, R, T, R1, R2, P1, P2, Q,

CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

//Precompute maps for cv::remap(),构建映射图

initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);

initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

// 读图,矫正

Mat img = imread(goodImageList[i*2+k], 0); // 为何要用黑白图呢?

Mat rimg, cimg;

remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);

cvtColor(rimg, cimg, COLOR_GRAY2BGR);

1.3 python的实现代码

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84# 0.基本配置

show_corners = False

image_number = 13

board_size = (9, 6) # 也就是boardSize

square_Size = 20

image_lists = [] # 存储获取到的图像

image_points = [] # 存储图像的点

# 1.读图,找角点

image_dir = "/home/wukong/opencv-4.1.0/samples/data"

image_names = []

[image_names.append(image_dir + "/left%02d.jpg" % i) for i in

[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]] # 没有10,坑爹

[image_names.append(image_dir + "/right%02d.jpg" % i) for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]]

print(len(image_names))

for image_name in image_names:

print(image_name)

image = cv2.imread(image_name, 0)

found, corners = cv2.findChessboardCorners(image, board_size) # 粗查找角点

if not found:

print("ERROR(no corners):" + image_name)

return None

# 展示结果

if show_corners:

vis = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

cv2.drawChessboardCorners(vis, board_size, corners, found)

cv2.imwrite(image_name.split(os.sep)[-1], vis)

cv2.namedWindow("xxx", cv2.WINDOW_NORMAL)

cv2.imshow("xxx", vis)

cv2.waitKey()

term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)

cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), term) # 精定位角点

image_points.append(corners.reshape(-1, 2))

image_lists.append(image)

# 2. 构建标定板的点坐标,objectPoints

object_points = np.zeros((np.prod(board_size), 3), np.float32)

object_points[:, :2] = np.indices(board_size).T.reshape(-1, 2)

object_points *= square_Size

object_points = [object_points] * image_number

# object_points = np.repeat(object_points[np.newaxis, :], 13, axis=0)

# print(object_points.shape)

# 3. 分别得到两个相机的初始CameraMatrix

h, w = image_lists[0].shape

camera_matrix = list()

camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[:image_number], (w, h), 0))

camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[image_number:], (w, h), 0))

# 4. 双目视觉进行标定

term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 100, 1e-5)

retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F =

cv2.stereoCalibrate(object_points, image_points[:image_number], image_points[image_number:], camera_matrix[0],

None, camera_matrix[1], None, (w, h),

flags=cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_USE_INTRINSIC_GUESS |

cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5,

criteria=term)

# 5. 标定精度的衡量, TODO

# 6. 保存标定结果 TODO

# 7. 矫正一张图像看看,是否完成了极线矫正

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 =

cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)

map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)

map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)

start_time = time.time()

result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)

result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)

print("变形处理时间%f(s)" % (time.time() - start_time))

result = np.concatenate((result1, result2), axis=1)

result[::20, :] = 0

cv2.imwrite("rec.png", result)

6671823-4cef8b70ac341240.png

极线矫正结果

整个结果看着还行哈。

2.图像根据标定结果进行极线矫正(stereoRectify 函数)

根据标定结果,放置新的相机

确认新的虚拟相机位置,满足极线平行关系

构造映射map

执行map 的变换 remap

1

2

3

4

5

6

7

8R1, R2, P1, P2, Q, validPixROI1, validPixROI2 =

cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)

map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)

map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)

result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)

result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)

3.在每条极线上寻找对应点(视差)

方法有很多

6671823-9d6d99f2c9965576.png

立体匹配的方法

StereoBM, block matching 算法,像素级别的位移,速度快

StereoSGBM,semi-global block matching算法,亚像素的精度,速度慢很多了,实时应用是不考虑的

StereoBeliefPropagation,据说是把这个问题当做了Markov随机场处理的,所以可以用信念传播的机制求解,这个目前尚未精通。也是要处理和学习的点。#TODO

4.根据视差转换为点云(cv2.reprojectImageTo3D)

只需要一步操作就完成了,很简单

1points = cv2.reprojectImageTo3D(disparity, Q)

5. 点云存储和显示

略,这些个在opencv/example/python中,应该都可以查看到。

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
双目测距是计算机视觉中一种常见的测距方法,通过两个摄像头或双目摄像头拍摄同一场景的两个不同视角的图像,计算两个视角之间的视差,从而得到场景中物体的距离。 在 Python 中,我们可以使用 OpenCV 库来实现双目测距。以下是一个简单的 OpenCV 双目测距代码示例: ```python import cv2 import numpy as np # 设置摄像头参数 cap_left = cv2.VideoCapture(1) cap_left.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap_left.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) cap_right = cv2.VideoCapture(2) cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 设置相机标定参数 K1 = np.array([[ 701.9780, 0, 324.4757], [ 0, 701.9780, 239.6201], [ 0, 0, 1.0000]]) K2 = np.array([[ 701.9780, 0, 303.5129], [ 0, 701.9780, 239.6201], [ 0, 0, 1.0000]]) D1 = np.array([[-0.0353, 0.0716, -0.0008, -0.0007, -0.0203]]) D2 = np.array([[-0.0375, 0.0716, -0.0019, -0.0009, -0.0213]]) R = np.array([[ 0.9993, -0.0056, -0.0373], [ 0.0058, 1.0000, 0.0044], [ 0.0373, -0.0046, 0.9993]]) T = np.array([[-76.7514], [ 0.5991], [ 0.0321]]) # 创建立体校正映射表 size = (640, 480) R1, R2, P1, P2, Q, _, _ = cv2.stereoRectify(K1, D1, K2, D2, size, R, T) map1x, map1y = cv2.initUndistortRectifyMap(K1, D1, R1, P1, size, cv2.CV_32FC1) map2x, map2y = cv2.initUndistortRectifyMap(K2, D2, R2, P2, size, cv2.CV_32FC1) while True: # 读取图像 ret1, img_left = cap_left.read() ret2, img_right = cap_right.read() if not ret1 or not ret2: break # 校正图像 img_left_remap = cv2.remap(img_left, map1x, map1y, cv2.INTER_LINEAR) img_right_remap = cv2.remap(img_right, map2x, map2y, cv2.INTER_LINEAR) # 计算视差图 stereo = cv2.StereoSGBM_create(minDisparity=0, numDisparities=16, blockSize=5, P1=8*3*5**2, P2=32*3*5**2, disp12MaxDiff=1, uniquenessRatio=10, speckleWindowSize=100, speckleRange=32) gray_left = cv2.cvtColor(img_left_remap, cv2.COLOR_BGR2GRAY) gray_right = cv2.cvtColor(img_right_remap, cv2.COLOR_BGR2GRAY) disp = stereo.compute(gray_left, gray_right).astype(np.float32) / 16.0 # 转换为深度图 f = 701.9780 # 焦距 b = 76.7514 # 双目基线 depth = f * b / disp # 显示深度图 depth_norm = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1) cv2.imshow("depth", depth_norm) # 等待按键 if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放资源 cap_left.release() cap_right.release() cv2.destroyAllWindows() ``` 在这个代码示例中,我们首先使用 `cv2.VideoCapture` 函数设置两个相机的参数,并读取左右两个相机的图像。然后,我们设置相机标定参数,通过 `cv2.stereoRectify` 函数生成立体校正映射表,使用 `cv2.remap` 函数对左右两个相机的图像进行校正。接着,我们使用 `cv2.StereoSGBM_create` 函数计算视差图,并将视差图转换为深度图。最后,我们使用 `cv2.imshow` 函数显示深度图,等待用户按下 'q' 键退出程序。 需要注意的是,本示例代码中的相机标定参数和立体校正参数都需要根据实际情况进行调整。同时,我们使用了 SGBM 算法进行视差计算,也可以使用其他算法,如 BM 算法或 Belief Propagation 算法。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值