图像拼接。

import cv2
import numpy as np


def read_image(image_path):
    if image_path.lower().endswith(".dcm"):
        import pydicom
        dicom = pydicom.dcmread(image_path)
        image = dicom.pixel_array
        image = cv2.normalize(image, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
        image = np.uint8(image)
    else:
        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

    return image


def detect_and_compute(image):
    sift = cv2.SIFT_create()
    keypoints, descriptors = sift.detectAndCompute(image, None)
    return keypoints, descriptors


def match_keypoints(descriptors1, descriptors2):
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)
    good_matches = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append(m)
    return good_matches


def get_homography(keypoints1, keypoints2, matches):
    src_pts = np.float32([keypoints1[m.queryIdx].pt for m in matches]).reshape(-1, 2)
    dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in matches]).reshape(-1, 2)
    H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    return H


def warp_images(image1, image2, H):
    height1, width1 = image1.shape
    height2, width2 = image2.shape
    points1 = np.array([[0, 0], [0, height1], [width1, height1], [width1, 0]], dtype=np.float32).reshape(-1, 1, 2)
    points2 = np.array([[0, 0], [0, height2], [width2, height2], [width2, 0]], dtype=np.float32).reshape(-1, 1, 2)
    points2_ = cv2.perspectiveTransform(points2, H)
    points = np.concatenate((points1, points2_), axis=0)

    [min_x, min_y] = np.int32(points.min(axis=0).ravel())
    [max_x, max_y] = np.int32(points.max(axis=0).ravel())

    translation_dist = [-min_x, -min_y]

    H_translation = np.array([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])
    output_img = cv2.warpPerspective(image2, H_translation.dot(H), (max_x - min_x, max_y - min_y))
    output_img[translation_dist[1]:translation_dist[1] + height1,
    translation_dist[0]:translation_dist[0] + width1] = image1

    return output_img


def stitch_images(image_paths):
    images = [read_image(image_path) for image_path in image_paths]

    base_image = images[0]

    for i in range(1, len(images)):
        keypoints1, descriptors1 = detect_and_compute(base_image)
        keypoints2, descriptors2 = detect_and_compute(images[i])

        matches = match_keypoints(descriptors1, descriptors2)
        H = get_homography(keypoints1, keypoints2, matches)

        base_image = warp_images(base_image, images[i], H)

    return base_image


# 示例文件路径,请替换成实际的文件路径
image_paths = ['E:/图像拼接/1.jpg', 'E:/图像拼接/2.jpg', 'E:/图像拼接/3.jpg']
stitched_image = stitch_images(image_paths)

# 显示拼接后的图像
cv2.imshow('Stitched Image', stitched_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# coding: utf-8
# 实现上下左右多副图像融合拼接
# 环境py37
import os
import cv2
import numpy as np
import pydicom as pd
from matplotlib import pyplot as plt

'''
    实现对多副图像的拼接融合
    需拼接图像存放到path路径:
        E:/Study/Opencv&Image/waiting_stitch_img
    拼接后图像存放路径为'save'路径:
        E:/Study/Opencv&Image/waiting_stitch_img/waiting_stitch_image
'''

path = r'C:/Users/LWKJ24070009/Desktop/Image stitching'
save = r'C:/Users/LWKJ24070009/Desktop/Image stitching/waiting_stitch_image'

image1 = cv2.imread(path + '/' + '1.jpg')
image2 = cv2.imread(path + '/' + '2.jpg')
image3 = cv2.imread(path + '/' + '3.jpg')
#image4 = cv2.imread(path + '/' + '4.jpg')

gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
gray3 = cv2.cvtColor(image3, cv2.COLOR_BGR2GRAY)
#gray4 = cv2.cvtColor(image4, cv2.COLOR_BGR2GRAY)


'''
    函数: read_Dcm
    功能: 读取Dcm文件 
'''


def read_Dcm(path):
    # path = r"G:/DiCom/"
    # savePath = r"G:/save_image/save_dicom/"
    fileList = os.listdir(path)  # 将此路径内的文件或文件夹的路径列出来
    # 统计文件下图像个数
    totalNum = len(fileList)
    print('totalNum', totalNum)

    for filename in os.listdir(path):
        print(filename)
        for file in os.listdir(path + filename):
            # print(path + filename + '/' + file)
            dcm = pd.read_file(path + filename + '/' + file)
            # dcm = path + filename
            plt.imshow(dcm.pixel_array, 'gray')
            plt.title("dcm")
            plt.show()
    return dcm


'''
    函数: dcm_loadFileInformation
    功能: 获取DCM文件内tag信息
    参数: 传入的是DCM文件的路径
    内容: 分别获取患者的就诊ID、姓名、出生日期、性别等。
    返回值: 
            返回DCM值
'''


def dcm_loadFileInformation(path1):
    information = {}
    dcm = pd.read_file(path1)
    information['PatientID'] = dcm.PatientID
    information['PatientName'] = dcm.PatientName
    information['PatientBirthDate'] = dcm.PatientBirthDate
    information['PatientSex'] = dcm.PatientSex
    information['StudyID'] = dcm.studyID
    information['studyDate'] = dcm.studyDate
    information['StudyTime'] = dcm.StudyTime
    information['InstitionName'] = dcm.InstitionName
    information['Manufacturer'] = dcm.Manufacturer
    print("The DCM file information:", dcm)
    return dcm


'''
    函数: dcm2JPG
    功能: 将DCM文件夹转换为 jpg格式文件
    参数: 传入DCM文件路径
    内容: 从指定路径里读取DCM文件,并将其转换为jpg格式图像
    参数: 返回jpg图像
'''


def dcm2JPG(path):
    dcm = pd.read_file(path)
    return dcm


'''
    函数: JPG2dcm
    功能: 将JPG文件夹转换为dcm格式文件
    参数: 传入JPG文件路径
    内容: 从指定路径里读取jpg文件,并将其转换为dcm格式图像;
          并将文件相应信息写入DCM
    参数: 返回dcm图像
'''


def JPG2dcm(newpath):
    return


'''
    函数: SiFt
    功能: 提取图像特征
    参数: image1,image2为传入图像
    内容: 
        1.调用detectAndCompute寻找特征点和描述子;   
        2.调用drawKeypoints绘制特征点
    返回值:
        Imag_1,Image_2  是 numpy.ndarray 类型的数组,它们保存的是在原始图像上绘制了特征点后的图像数据
        kp1,des_1,kp2,des_2分别表示两张图片的特征点和描述子
        kp包含了每个关键点的坐标、尺度、方向等信息。
        des描述关键点附近的局部图像特性,作为输入进行特征匹配
'''


def SIFT(image1, image2):
    sift = cv2.SIFT_create()
    kp1, des_1 = sift.detectAndCompute(image1, None)
    kp2, des_2 = sift.detectAndCompute(image2, None)
    Image_1 = cv2.drawKeypoints(image1, kp1, None)
    Image_2 = cv2.drawKeypoints(image2, kp2, None)
    return Image_1, Image_2, kp1, des_1, kp2, des_2


'''
    函数: FlAnn快速最近邻搜索包
    功能: 图像特征匹配
    参数: 传入图像的描述子des_1、des_2
    内容:
        1.index_params配置所需算法
            字典类型:存储FLANN匹配器的索引参数。algorithm:指定使用的算法,FLANN_INDEX_KDTREE = 0 表示使用KD-Tree算法。
        2.search_params设定递归次数   
             字典类型:存储搜索参数。checks:表示搜索阶段递归遍历的树的次数。值越高,搜索越精确,但速度变慢。
        3.matches存储描述子之间的匹配结果。
            类型:list of DMatch objects;每个DMatch对象包含以下信息:
                                            queryIdx:查询描述子的索引(对应于des_1)。
                                            trainIdx:训练描述子的索引(对应于des_2)。
                                            imgIdx:训练图像的索引(当有多张图像时特定)。
                                            distance:特征点之间的距离(越小越匹配)。
        4.matchesMask:
            类型:list of list of int
            含义:用于存储匹配的掩码(mask),初始化为零。这个掩码可以在进一步筛选匹配点时用,如比率测试。每个元素是包含两个整数的列表,
            初始值为 [0, 0],对应于每个knnMatch结果的两个候选匹配点,0表示匹配结果无效。
            
    返回值:
         matchesc存放匹配对;
         matchesMask# 只需要绘制好的匹配项,因此创建一个掩码
'''


def flann(des_1, des_2):
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE,
                        tress=5)
    search_params = dict(checks=200)
    flann = cv2.FlannBasedMatcher(index_params, search_params) #FLANN匹配器
    matches = flann.knnMatch(des_1, des_2, k=2) #k=2,每个关键点有两个最近的匹配。所以matches是一个二维列表,列表每个元素自身是一个包含两个DMatch对象的列表。
    matchesMask = [[0, 0] for i in range(len(matches))]
    return matches, matchesMask


'''
    函数: ration_test
    功能: 比率测试 将不满足的最近邻匹配之间距离比率大于设定的阈值的匹配剔除.
    参数: 
         matches匹配对,kp特征点, matchesMask
    内容:
          1.当最小的两个距离的比率超过一定时,判断为不好的点,
                如果最近邻匹配距离与次近邻匹配距离的比率小于0.8,则认为这个匹配是可靠的。
          2.good[]用于存放匹配对数;设置matchesMask过滤匹配点
    返回值:
          good:存放匹配对数 ; list of cv2.DMatch objects
          pts1:存储通过比率测试的匹配点的坐标; list of tuple
          matchesMask:存放最优匹配点,更新后的匹配掩码,标记哪些匹配是通过测试的。
'''


def ration_test(matches, kp1, matchesMask):
    good = []  # 存放匹配对数
    pts1 = []  # 存放关键点的坐标
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts1.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [i, 0]
    return good, pts1, matchesMask


'''
    函 数: MakeBorder
    功 能: 创建一个模板,用于匹配
    参 数: 
          Bordermg1、Bordermg2为输入图像;
    内 容: 
          srcImg对img1进行边界扩充后的图像 ;
          testImg对img2进行边界扩充后的图像 ;
    返回值:
          返回srcImg与TestImg
'''

def MakeBorder(BorderImg1, Bordermg2):
    top, bot, left, right = 200, 800, 200, 200
    srcImg = cv2.copyMakeBorder(BorderImg1,
                                top, bot, left, right,
                                cv2.BORDER_CONSTANT,
                                value=(0, 0, 0))
    testImg = cv2.copyMakeBorder(Bordermg2,
                                 top, bot, left, right,
                                 cv2.BORDER_CONSTANT,
                                 value=(0, 0, 0))
    return srcImg, testImg


'''
    函数: Draw_Tool
    功能: draw_params给特征点和匹配的线定义颜色,
         draw_Img连接匹配点;
    内容: draw_params给特征点和匹配的线定义颜色,
         draw_Img连接匹配点;
    参数: 
          matchesMask, 存放最优匹配点
          gray1,  gray2,待匹配图像
          kp1, kp2, 分别表示图像特征点
          matches,保存图像匹配对数
          matchColor:用于绘制匹配点连线的颜色,
          singlePointColor:用于绘制未匹配关键点的颜色,
          flags:绘图标志,这里设为 0,表示绘制默认的关键点和匹配连线的样式。
    返回值:
          draw_Img,特征匹配图像
'''


def Draw_Tool(matchesMask, gray1, kp1, gray2, kp2, matches):
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    draw_Img = cv2.drawMatchesKnn(gray1, kp1, gray2,
                                  kp2, matches, None,
                                  **draw_params)
    # 确定保存文件名
    save_filename = get_next_filename(save, "draw_Img")
    save_path = os.path.join(save, save_filename)
    # 保存图像
    cv2.imwrite(save_path, draw_Img)
    return draw_Img


def get_next_filename(directory, base_filename, extension=".jpg"):
    """获取下一个递增文件名"""
    counter = 1
    while True:
        filename = f"{base_filename}_{counter}{extension}"
        if not os.path.exists(os.path.join(directory, filename)):
            return filename
        counter += 1

'''
    函数: StitchImg
    功能: 实现图像竖向、横向拼接
    内容: 
    warpImg:主要用于展示 testImg 经过透视变换后的效果,确保 testImg 正确对齐到 srcImg 的坐标系中。
    warpImg1:主要用于对 srcImg 进行透视变换并扩展其尺寸,提供一张更大的图像来容纳最终拼接结果。
    参数: 
        srcImg,与上同;testImg,与上同
        good,存放好的匹配点
        kp1, kp2图像的特征点
'''

def StitchImg(srcImg, testImg, good, kp1, kp2):
    print("===========即将进行拼接!==============\n")
    rows, cols = srcImg.shape[:2]
    print("srcImg.shape[:2]", srcImg.shape[:2])
    rows1, cols1 = testImg.shape[:2]
    print("testImg.shape[:2]", testImg.shape[:2])
    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        print("len(good)", len(good))
        # 查询图像的特征描述子索引
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        # 训练(模板)图像的特征描述子索引
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
        # 计算的M是img1转换到img2的转换矩阵
        M, mask = cv2.findHomography(src_pts, dst_pts,
                                     cv2.RANSAC, 5.0)
        # flag用的是warp_inverse_map, 这是对M做一个反变换
        # 透视变换,新图像可容纳完整的两幅图
        warpImg = cv2.warpPerspective(testImg, np.array(M), (
            testImg.shape[1] ,
            testImg.shape[0]),
            flags=cv2.WARP_INVERSE_MAP
                                      )
        plt.imshow(warpImg, )
        plt.title("warpImg")
        plt.show()
        warpImg1 = cv2.warpPerspective(srcImg, np.array(M), (
            srcImg.shape[1]+ 1500 ,
            srcImg.shape[0]+ 1500 ),
                                       flags=cv2.WARP_INVERSE_MAP
                                       )
        plt.imshow(warpImg1)
        plt.title("warpImg1")
        plt.show()
        # 开始重叠的最左端
        for col in range(0, cols):
            left = col
            break
        print("开始重叠的左端位置是:第" + str(col) + "列!")
        # 从列表的下标为cols-1的元素开始,步长为1,
        # 倒序取到下标为0的元素(但是不包括下标为0元素)
        for col in range(cols - 1, 0, -1):
            # 重叠的最右一列
            if srcImg[:, col].any() and warpImg[:, col].any():
                right = col
                print("right", right)
                break
        print("开始重叠的右端位置是:第" + str(right) + "列!")
        # 存储黑色图像于res中,作为模板
        res = np.zeros([rows, cols, 3], np.uint8)
        plt.imshow(res)
        plt.title("res")
        plt.show()
        print("res_size", res.shape[:2])
        print("rows", rows)
        print("cols", cols)
        # 将图片testImg或srcImg像素填充进入至创建好的res模板中
        for row in range(0, rows):
            # print("row", row)
            for col in range(0, cols):
                # 如果没有原图,用旋转的填充
                if not srcImg[row, col].any():
                    res[row, col] = warpImg[row, col]
                elif not warpImg[row, col].any():
                    res[row, col] = srcImg[row, col]
                else:
                    # 图像融合
                    srcImgLen = float(abs(col - left))
                    testImgLen = float(abs(col - right))
                    alpha = srcImgLen / (srcImgLen + testImgLen)
                    res[row, col] = np.clip(srcImg[row, col]
                                            * (1 - alpha)
                                            + warpImg[row, col]
                                            * alpha
                                            , 0, 255)
        res_RGB = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        plt.figure()
        plt.imshow(res_RGB)
        plt.title("res_RGB")
        plt.show()
        # 确定保存文件名
        save_filename = get_next_filename(save, "stitch_Img")
        save_path = os.path.join(save, save_filename)
        # 保存图像
        cv2.imwrite(save_path, res_RGB)

    else:
        print("Not enough matches are found - {}/{}".format(len(good),
                                                            MIN_MATCH_COUNT))
        matchesMask = None
    print("=====================拼接完成!===========================\n")
    return res_RGB


# 主程序执行模块
# def main():

if __name__ == '__main__':
    print("=============主函数执行!!===========\n")
    # 第一次拼接
    print("第一次拼接!!!!!")
    img1, img2, k1, des1, k2, des2 = SIFT(image1, image2)
    matches, matchesMask1 = flann(des1, des2)
    good, pts, matchesMask2 = ration_test(matches, k1, matchesMask1)
    srcImg, testImg = MakeBorder(image1, image2)
    draw_Img = Draw_Tool(matchesMask2, gray1, k1, gray2, k2, matches)
    res1 = StitchImg(srcImg, testImg, good, k1, k2)

    # 第二次拼接拼接结果res1和图像3
    print("第二次拼接!!!!!")
    img_res1, img3, k_res1, des_res1, k3, des3 = SIFT(res1, image3)
    matches13, matchesMask13 = flann(des_res1, des3)
    good2, pts2, matchesMask3 = ration_test(matches13, k_res1, matchesMask13)
    srcImg1, testImg1 = MakeBorder(res1, image3)
    draw_Img = Draw_Tool(matchesMask3, res1, k_res1, gray3, k3, matches13)
    res2 = StitchImg(srcImg1, testImg1, good2, k_res1, k3)

# if __name__ == '__main__':
#     print("=============主函数执行!!===========\n")
#     # path = r"G:/DiCom/"
#     # dcm = read_Dcm(path)
#     print("第一次拼接!!!!!")
#     img1, img2, k1, des1, k2, des2 = SIFT(image1, image2)
#     matches, matchesMask1 = flann(des1, des2)
#     good, pts, matchesMask2 = ration_test(matches, k1, matchesMask1)
#     srcImg, testImg = MakeBorder(image1, image2) #创建边界
#     draw_Img = Draw_Tool(matchesMask2, gray1, k1, gray2, k2, matches) #绘制匹配点
#     res1 = StitchImg(srcImg, testImg, good, k1, k2) #拼接
#     print("第二次拼接!!!!!")
#     img3, img4, k3, des3, k4, des4 = SIFT(image3, image4)
#     matches34, matchesMask34 = flann(des3, des4)
#     good1, pts1, matchesMask5 = ration_test(matches34, k3, matchesMask34)
#     srcImg1, testImg1 = MakeBorder(image3, image4)
#     draw_Img1 = Draw_Tool(matchesMask5, gray3, k3, gray4, k4, matches34)
#     res2 = StitchImg(srcImg1, testImg1, good1, k3, k4)
#     print("第三次拼接!!!!!")
#     img5, img6, k5, des5, k6, des6 = SIFT(res1, res2)
#     matches56, matchesMask56 = flann(des5, des6)
#     good2, pts2, matchesMask6 = ration_test(matches56, k5, matchesMask56)
#     srcImg2, testImg2 = MakeBorder(res1, res2)
#     draw_Img2 = Draw_Tool(matchesMask56, res1, k5, res2, k6, matches56)
#     res3 = StitchImg(srcImg2, testImg2, good2, k5, k6)

  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值