# 图像拼接
# 1、读文件并且重置尺寸
# 2、根据特征点和计算描述子,得到单应性矩阵
# 3、图像变换
# 4、图像拼接并且输出图像
import cv2
import numpy as np
# 进行图像拼接
def sticth(img1, img2, H):
# 1、获得图像的4个角点
# 2、对图片进行变换
# 3、创建一张大图,将俩张图拼接在一起
# 4、将结果输出
# 获取原始图像的高、宽
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
img1_dims = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
img2_dims = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)
img2_transfrom = cv2.perspectiveTransform(img2_dims, H)
# print(img2_dims)
# print(img1_dims)
# print(img2_transfrom)
result_dims = np.concatenate((img1_dims, img2_transfrom), axis=0)
# a = result_dims.min(axis=0)
# print(a)
[x_min, y_min] = np.int32(result_dims.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(result_dims.max(axis=0).ravel() + 0.5)
# 平移的距离
transfrom_dist = [-x_min, -y_min]
# [1, 0, dx]
# [0, 1, dy]
# [0, 0, 1 ]
# 乘以一个齐次坐标来平移
transform_array = np.array([[1, 0, transfrom_dist[0]],
[0, 1, transfrom_dist[1]],
[0, 0, 1]])
# 投影变换
result_img = cv2.warpPerspective(img1, transform_array.dot(H), (x_max - x_min, y_max - y_min))
# 图像拼接
result_img[transfrom_dist[1]:transfrom_dist[1] + h2, transfrom_dist[0]:transfrom_dist[0] + w2] = img2
return result_img
# 得到单应性矩阵
def get_homo(img1, img2):
# 1、创建特征转换对象
# 2、通过特征转换对象获得特征点和描述子
# 3、创建特征匹配器
# 4、进行特征匹配
# 5、过滤特征、找出有效的特征匹配点
# # 灰度化
# g1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# g2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 创建SIFT对象
sift = cv2.SIFT_create()
# 对两张图进行特征点和描述子的计算
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# 创建匹配器
bf = cv2.BFMatcher()
# 对描述子进行匹配计算
matches = bf.knnMatch(des1, des2, k=2)
# index_params = dict(algorithm=1, tree=5)
# search_params = dict(checks=50)
# flann = cv2.FlannBasedMatcher(index_params, search_params)
#
# matchs = flann.knnMatch(des1, des2, k=2)
# 过滤,对匹配点进行优化,获取优秀匹配点
good = []
# 返回的分别是第一张图的匹配点和第二张图的匹配点
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) >= 4:
# 查找单应性矩阵
img1pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
img2pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# 通过特征点坐标计算单应性矩阵H
# (findHomography中使用了RANSAC算法剔除错误匹配)
# mask掩码
H, mask = cv2.findHomography(img1pts, img2pts, cv2.RANSAC, 5.0)
return H
else:
print("the 'good' number less than 4.")
exit()
# 读取两张图片
img1 = cv2.imread('../img/map1.jpg')
img2 = cv2.imread('../img/map2.jpg')
# 固定图像长宽
img1 = cv2.resize(img1, (640, 480))
img2 = cv2.resize(img2, (640, 480))
inputs = np.hstack((img1, img2))
# 得到单应性矩阵
H = get_homo(img1, img2)
# 进行图像拼接
result_img = sticth(img1, img2, H)
cv2.imshow('img', result_img)
cv2.waitKey(0)