基于pyQT5的对齐相减,旋转,增强,分割程序

本文内容包括,特征匹配,单应性矩阵,图像对齐,图像相减,图像旋转,图像增强,大津法图像分割,自适应阈值图像分割

一.界面展示

二.算法讲解

2.1对齐相减

原理:在一个图像中检测到一组特征点,并与另一张图像中的特征点相匹配。然后根据这些匹配的特征点计算出一个转换规则,从而将一个图像映射到另一个图像上。

详细流程

1.检测特征点:为两张图检测ORB特征点。为了计算单应性矩阵4个就够了,但是一般会检测到成百上千的特征点。可以使用参数MAX_FEATURES来控制检测的特征点的数量。检测特征点并计算描述子的函数是detectAndCompute。
2.特征匹配:找到两图中匹配的特征点,并按照匹配度排列,保留最匹配的一小部分。然后把匹配的特征点画出来并保存图片到磁盘。我使用Hamming来度量两个特征点描述子的相似度。注意,有很多错误匹配的特征点,所以下一步要用一个健壮的算法来计算单应性矩阵。
3.计算单应性矩阵:上一步产生的匹配的特征点不是100%正确的,就算只有20~30%的匹配是正确的也不罕见。
findHomography函数使用一种被称为随机抽样一致算法(Random Sample Consensus )的技术在大量匹配错误的情况下计算单应性矩阵。
4.扭转图片:有了精确的单应性矩阵,就可以把一张图片的所有像素映射到另一个图片。warpPerspective函数用来完成这个功能。

2.2图像旋转

2.3图像增强

2.4大津法分割:

2.5自适应阈值分割

源代码:

import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QFileDialog, QVBoxLayout, QHBoxLayout, QLineEdit
from PyQt5.QtGui import QPixmap
from PIL import Image, ImageChops, ImageEnhance, ImageFilter, ImageOps
from PIL import Image, ImageFilter, ImageEnhance
import cv2
import numpy as np
import math


class ImageProcessor(QWidget):
    def __init__(self):
        super().__init__()
        self.initUI()

    def initUI(self):
        # 设置UI布局和组件
        self.layout = QVBoxLayout()

        # 第一行:选择图片1
        self.layout1 = QHBoxLayout()
        self.btn_browse1 = QPushButton('浏览图片1', self)
        self.btn_browse1.clicked.connect(lambda: self.browse_image(1))
        self.btn_show1 = QPushButton('显示图片1', self)
        self.btn_show1.clicked.connect(lambda: self.show_image(1))
        self.layout1.addWidget(self.btn_browse1)
        self.layout1.addWidget(self.btn_show1)
        self.layout.addLayout(self.layout1)

        # 第二行:选择图片2
        self.layout2 = QHBoxLayout()
        self.btn_browse2 = QPushButton('浏览图片2', self)
        self.btn_browse2.clicked.connect(lambda: self.browse_image(2))
        self.btn_show2 = QPushButton('显示图片2', self)
        self.btn_show2.clicked.connect(lambda: self.show_image(2))
        self.layout2.addWidget(self.btn_browse2)
        self.layout2.addWidget(self.btn_show2)
        self.layout.addLayout(self.layout2)

        # 第三行:输入旋转角度

        self.layout3 = QHBoxLayout()
        self.encryption_label = QLabel("输入需要旋转的角度:")
        self.rotate_input = QLineEdit(self)
        self.layout3.addWidget(self.rotate_input)
        self.layout.addLayout(self.layout3)

        # 第四行:相减,旋转,增强按钮
        self.layout4 = QHBoxLayout()
        self.btn_subtract = QPushButton('图像相减', self)
        self.btn_subtract.clicked.connect(self.subtract_images)
        self.btn_rotate = QPushButton('旋转', self)
        self.btn_rotate.clicked.connect(self.rotate_image)
        self.btn_enhance = QPushButton('增强', self)
        self.btn_enhance.clicked.connect(self.enhance_image)
        self.btn_segment_image_otsu = QPushButton('大津法分割', self)
        self.btn_segment_image_otsu.clicked.connect(self.segment_image_otsu)
        # segment_image_adaptive_threshold
        self.btn_segment_image_adaptive_threshold = QPushButton('自适应分割', self)
        self.btn_segment_image_adaptive_threshold.clicked.connect(self.segment_image_adaptive_threshold)
        self.layout4.addWidget(self.btn_subtract)
        self.layout4.addWidget(self.btn_rotate)
        self.layout4.addWidget(self.btn_enhance)
        self.layout4.addWidget(self.btn_segment_image_otsu)
        self.layout4.addWidget(self.btn_segment_image_adaptive_threshold)
        self.layout.addLayout(self.layout4)

        # 图像显示区域
        self.image_label = QLabel(self)
        self.layout.addWidget(self.image_label)

        self.setLayout(self.layout)
        self.setGeometry(300, 300, 350, 300)
        self.setWindowTitle('Image Processor')
        self.show()

        # 初始化变量
        self.image_paths = ['', '']

    def browse_image(self, image_number):
        # 浏览图片
        fname, _ = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        if fname:
            self.image_paths[image_number - 1] = fname

    # def show_image(self, image_number):
    #     # 显示图片
    #     if self.image_paths[image_number - 1]:
    #         pixmap = QPixmap(self.image_paths[image_number - 1])
    #         self.image_label.setPixmap(pixmap)
    #         self.image_label.adjustSize()

    def show_image(self, image_number):
        if self.image_paths[image_number - 1]:
            pixmap = QPixmap(self.image_paths[image_number - 1])
            self.image_label.setPixmap(pixmap)
            # 根据图像大小调整标签大小
            self.image_label.resize(pixmap.width(), pixmap.height())

    def subtract_images(self):
        # 图像相减
        if all(self.image_paths):
            image1 = Image.open(self.image_paths[0])
            image2 = Image.open(self.image_paths[1])
            # image1 = cv2.imread(self.image_paths[0])
            # image2 = cv2.imread(self.image_paths[1])
            #
            # # 转换为灰度图
            # gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
            # gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
            #
            # # 应用高斯模糊
            # blurred1 = cv2.GaussianBlur(gray1, (5, 5), 0)
            # blurred2 = cv2.GaussianBlur(gray2, (5, 5), 0)
            #
            # # ORB特征检测和匹配
            # orb = cv2.ORB_create()
            # kp1, des1 = orb.detectAndCompute(blurred1, None)
            # kp2, des2 = orb.detectAndCompute(blurred2, None)
            # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
            # matches = bf.match(des1, des2)
            # matches = sorted(matches, key=lambda x: x.distance)
            #
            # # 使用特征点匹配对齐图像
            # src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
            # dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
            # matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            # aligned = cv2.warpPerspective(image1, matrix, (image2.shape[1], image2.shape[0]))
            #
            # # 将对齐后的图像转换为PIL图像进行增强和相减
            # aligned_pil = Image.fromarray(cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB))
            # image2_pil = Image.fromarray(cv2.cvtColor(image2, cv2.COLOR_BGR2RGB))
            #
            # # 图像增强
            # enhancer = ImageEnhance.Contrast(aligned_pil)
            # enhanced_image = enhancer.enhance(2.0)
            #
            # # 图像相减
            # result = ImageChops.subtract(enhanced_image, image2_pil)
            # result.show()

            # 检测特征点并计算描述子
            image1 = cv2.imread(self.image_paths[0])
            image2 = cv2.imread(self.image_paths[1])

            if image1 is None or image2 is None:
                print("Error: Unable to load images.")
                return

            # 转换为灰度图
            gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
            gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)

            # ORB特征检测和匹配
            MAX_FEATURES = 500
            orb = cv2.ORB_create(MAX_FEATURES)
            kp1, des1 = orb.detectAndCompute(gray1, None)
            kp2, des2 = orb.detectAndCompute(gray2, None)

            if des1 is None or des2 is None:
                print("Error: No descriptors found.")
                return

            matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = matcher.match(des1, des2)
            matches = sorted(matches, key=lambda x: x.distance)

            # 确保找到足够的匹配点
            if len(matches) < 4:
                print("Error: Not enough matches.")
                return

            # 计算单应性矩阵
            src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
            H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            if H is None:
                print("Error: Unable to compute homography.")
                return

            # 扭转图片
            height, width = image2.shape[:2]
            warped_image1 = cv2.warpPerspective(image1, H, (width, height))

            # 图像相减
            result = cv2.absdiff(warped_image1, image2)
            cv2.namedWindow("Result", cv2.WINDOW_NORMAL)  # 创建一个可调整大小的窗口
            cv2.imshow("Result", result)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            # 将OpenCV图像转换为PIL图像并显示
            result_pil = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
            result_pil.show()

    def rotate_image(self):
        # 旋转图片
        if self.image_paths[0]:
            angle = int(self.rotate_input.text())
            image = Image.open(self.image_paths[0])

            # 旋转图像前的尺寸和中心
            original_width, original_height = image.size
            original_center = (original_width / 2, original_height / 2)

            # 计算旋转后图像的尺寸
            angle_rad = math.radians(angle)
            new_width = abs(original_width * math.cos(angle_rad)) + abs(original_height * math.sin(angle_rad))
            new_height = abs(original_height * math.cos(angle_rad)) + abs(original_width * math.sin(angle_rad))

            # 创建一个新的图像,尺寸足以容纳旋转后的原图
            result = Image.new("RGBA", (int(new_width), int(new_height)), (0, 0, 0, 0))

            # 将原图像放置在新图像的中心
            result.paste(image, (int((new_width - original_width) / 2), int((new_height - original_height) / 2)))

            # 旋转图像
            result = result.rotate(angle)

            result.show()


    def enhance_image(self):
        # 图像增强
        if self.image_paths[0]:
            image = Image.open(self.image_paths[0]).convert('L')  # 转换为灰度图像

            # 自适应直方图均衡
            result = ImageOps.equalize(image)

            # 亮度调整
            enhancer = ImageEnhance.Brightness(result)
            result = enhancer.enhance(1.2)

            # 对比度增强
            enhancer = ImageEnhance.Contrast(result)
            result = enhancer.enhance(1.3)

            # 锐化处理
            enhancer = ImageEnhance.Sharpness(result)
            result = enhancer.enhance(2.0)

            result.show()

    def segment_image_otsu(self):
        if self.image_paths[0]:
            image = Image.open(self.image_paths[0])

            def otsu_threshold(image):
                # 将图像转换为灰度
                grayscale = np.array(image.convert('L'))
                # 计算直方图
                histogram, bin_edges = np.histogram(grayscale, bins=256, range=(0, 256))
                # 大津法计算阈值
                total = grayscale.size
                sum_total = np.dot(bin_edges[:-1], histogram)
                sumB, wB, maximum = 0.0, 0.0, 0.0
                threshold = 0
                for i in range(256):
                    wB += histogram[i]
                    if wB == 0:
                        continue
                    wF = total - wB
                    if wF == 0:
                        break
                    sumB += i * histogram[i]
                    meanB = sumB / wB
                    meanF = (sum_total - sumB) / wF
                    varBetween = wB * wF * (meanB - meanF) ** 2
                    if varBetween > maximum:
                        maximum = varBetween
                        threshold = i
                return threshold

            # 应用大津法阈值
            threshold = otsu_threshold(image)
            segmented_image = image.convert('L').point(lambda x: 0 if x < threshold else 255, '1')
            segmented_image.show()

    def segment_image_adaptive_threshold(self):
        if self.image_paths[0]:
            # 使用Pillow库读取图像
            image_pil = Image.open(self.image_paths[0])
            # 将图像转换为NumPy数组,以便使用OpenCV
            image_np = np.array(image_pil)
            # 转换为灰度图像
            grayscale = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
            # 应用自适应阈值
            # 25 是邻域大小,C 是从平均值或加权平均值中减去的常数
            segmented_image = cv2.adaptiveThreshold(grayscale, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                    cv2.THRESH_BINARY, 25, C=10)
            # 使用Pillow显示图像
            Image.fromarray(segmented_image).show()

if __name__ == '__main__':
    app = QApplication(sys.argv)
    ex = ImageProcessor()
    sys.exit(app.exec_())
  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值