Opencv笔记

文章详细讲解了如何使用OpenCV进行图像处理,包括边缘检测、滤波、特征提取(如SIFT、SURF)及特征匹配,并展示了相关代码实例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

1、2023/9/7

import cv2
import numpy as np

if __name__ == '__main__':
    print("Hello World!")
    src = cv2.imread('pcb.png', 0)
    print(src.size)  # 360960
    print(src.shape)  # (480, 752) = (h, w)

    crop = src[:128, :64]
    print(crop.shape)  # (128, 64)

    # filter
    dst_blur = cv2.blur(src, (5, 5))  # k_size --- kernel size
    dst_median = cv2.medianBlur(src, 5)  # k_size = 3, 5, 7...
    dst_box = cv2.boxFilter(src, -1, (5, 5))  # same as blur  depth---8U, 8S, 32F...
    dst_bilateral = cv2.bilateralFilter(src, 5, 150, 150)  # bilateral filter
    dst_gauss = cv2.GaussianBlur(src, (5, 5), 1)  # gauss filter  k_size  sigma
    kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype='float32') / 16  # conv kernel
    dst_filter = cv2.filter2D(src, -1, kernel)  # convolve

    # edge detect
    edges = cv2.Canny(src, 100, 200)  # edges --- binary image
    edges_sch = cv2.Scharr(src, -1, 0, 1)  # sch
    edges_laplacian = cv2.Laplacian(src, -1)  # laplacian --- second order
    edges_sobel_x = cv2.Sobel(dst_median, cv2.CV_32F, 1, 0, ksize=1)  # sobel  k_size=1,3,5,...
    edges_sobel_y = cv2.Sobel(dst_median, cv2.CV_32F, 0, 1, ksize=1)  # sobel  k_size=1,3,5,...

    mag, angele = cv2.cartToPolar(edges_sobel_x, edges_sobel_y, angleInDegrees=True)  # magnitude  angle
    mag = cv2.normalize(mag, 0.0, 1.0, norm_type=cv2.NORM_MINMAX)  # scale to 0~1
    minV, maxV, minLoc, maxLoc = cv2.minMaxLoc(mag)
    print(minV, " ", maxV)

    # HOG
    hog = cv2.HOGDescriptor()
    descriptors = hog.compute(crop, (8, 8), (0, 0))
    print(descriptors.shape)  # (3780,)  3780=7*15*4*9
    print(descriptors[:36])

    """
    # harris
    dst1 = cv2.cornerHarris(src, 3, 23, 0.04)

    # sift
    sift = cv2.SIFT_create(2000)
    key_points1, descriptor1 = sift.detectAndCompute(src, None)

    # surf
    surf = cv2.xfeatures2d.SURF_create(20000)
    key_points2, descriptor2 = surf.detectAndCompute(src, None)

    # fast
    fast = cv2.FastFeatureDetector_create(threshold=50)
    key_points3 = fast.detect(src, None)

    # orb
    orb = cv2.ORB_create(128)
    key_points4, descriptors4 = orb.detectAndCompute(src, None)

    # match
    cv2.BFMatcher_create()  # Brute-force
    cv2.FlannBasedMatcher_create()  # Flann
    """

    cv2.imshow('src', dst_median)
    cv2.imshow('dst', mag)
    cv2.waitKey(0)

2、2023/9/8

if __name__ == '__main__':
    img1 = cv2.imread('bga_r_01.png')  # queryImage
    img2 = cv2.imread('bga_r_06.png')  # trainImage
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # sift features
    sift = cv2.SIFT_create(100)
    kp1, des1 = sift.detectAndCompute(gray1, None)
    kp2, des2 = sift.detectAndCompute(gray2, None)
    print('kp1 size = ', len(kp1))
    print('kp2 size = ', len(kp2))

    # 在图像上绘制关键点
    flag = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    img1 = cv2.drawKeypoints(image=img1, keypoints=kp1, outImage=img1, color=(255, 0, 255), flags=flag)
    img2 = cv2.drawKeypoints(image=img2, keypoints=kp2, outImage=img2, color=(255, 0, 255), flags=flag)

    # 显示图像
    cv2.imshow('sift_keypoints1', img1)
    cv2.imshow('sift_keypoints2', img2)
    cv2.waitKey(100)

    FLANN_INDEX_KDTREE = 0
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    searchParams = dict(checks=50)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    matches = flann.knnMatch(des1, des2, k=2)
    print('matches size = ', len(matches))  # 101 matcher pairs

    # Lowe’s算法---最近的距离除以次近的距离得到的比率ratio少于某个阈值T,则接受这一对匹配点
    goodMatches = []
    for m, n in matches:
        if m.distance / n.distance < 0.9:
            goodMatches.append(m)
    print('goodMatches size = ', len(goodMatches))  # 75 matcher pairs

    if len(goodMatches) > 10:
        # 获取匹配点坐标
        src_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches]).reshape(-1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches]).reshape(-1, 2)
        print('src_pts:', len(src_pts), src_pts[0])  # 75
        print('dst_pts:', len(dst_pts), dst_pts[0])  # 75
        # 获取单应性
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)  # allow error = 5.0
        matchesMask = mask.ravel().tolist()  # numpy.flatten() and numpy.ravel()
        indices = np.where(np.array(matchesMask) == 1)
        print('matchesMask:', len(matchesMask))  # 75
        print('mask:', indices[0])  # [23 25 27 31 34 35 40 41 48 50 54 55 56 58]
        # homography
        pts = np.float32([[217, 221], [434, 222], [432, 440], [216, 437]]).reshape(-1, 1, 2)  # (4,1,2)
        dst = cv2.perspectiveTransform(pts, M)  # (4,1,2)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, (0, 255, 0), 2, cv2.LINE_AA)  # plot
    else:
        print("Not enough matches are found - %d/%d" % (len(goodMatches), 10))
        matchesMask = None

    draw_params = dict(matchColor=(0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
    img3 = cv2.drawMatches(img1, kp1, img2, kp2, goodMatches, None, **draw_params)
    cv2.imshow('matches', img3)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值