ShapeMatching Using Fourier Descriptor

 

github地址:https://github.com/AliceLeBrigant/ShapeMatching

这个代码上来就花轮廓,根据轮廓查找相似的,背景必须纯色,商用价值不高。

import numpy as np
import cv2

print("Shape Matching Using Fourier Descriptor")

distThreshold = 0.06
ix, iy = -1, -1
rect = (0, 0, 1, 1)

manually = True
temSeleteFlag = False
temReadyFlag = False
temConfirmFlag = False
matchOverFlag = False

templeteComVector = []
sampleComVectors = []
sampleContours = []


# Manually select templete by mouse, On/Off by manually flag
def selectTemplete(event, x, y, flags, param):
    global rect, temSeleteFlag, temReadyFlag, ix, iy

    if event == cv2.EVENT_LBUTTONDOWN and temReadyFlag == False:
        temSeleteFlag = True
        ix, iy = x, y

    elif event == cv2.EVENT_LBUTTONUP:
        if temReadyFlag == False and temSeleteFlag == True:
            # rect is selected templete ROI
            rect = (min(ix, x), min(iy, y), abs(ix - x), abs(iy - y))
            # draw a blue rectangle after selection
            cv2.rectangle(imgOri, (ix, iy), (x, y), (255, 0, 0), 2)
        temSeleteFlag = False
        temReadyFlag = True


# Main findcontour function
def getContours(img):
    imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Threshold white paper(background) to white pixel(255), word is actully black(0)
    retvalth, imgthreshold = cv2.threshold(imgray, 50, 255, cv2.THRESH_BINARY)
    # We want words are white, backgournd is black, easy for opencv findcontour function
    imgthresholdNot = cv2.bitwise_not(imgthreshold)
    # Dilation make all 6 to form a closed loop
    kernel = np.ones((5, 5), np.uint8)
    imgdilation = cv2.dilate(imgthresholdNot, kernel, iterations=2)
    # Must use EXTERNAL outer contours, Must use CHAIN_APPROX_NONE method(not change points)
    imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    return contours


# Get complex vector of templete contour
def getTempleteCV():
    # This is the templete region that we select by mouse or default
    templeteROI = imgOricpy[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]
    # Automatically find templete contour
    tpContour = getContours(templeteROI)

    for contour in tpContour:
        x, y, w, h = cv2.boundingRect(contour)
        for point in contour:
            # -x and -y are to make left and upper boundry start from 0
            templeteComVector.append(complex(point[0][0] - x, (point[0][1] - y)))


# Get complex vectors of testees contours
def getSampleCV():
    spContours = getContours(imgOricpy)
    # cv2.drawContours(imgOri, spContours, -1, (0, 0, 255), 1)

    for contour in spContours:
        sampleComVector = []
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(imgOri, (x, y), (x + w, y + h), (100, 100, 100), 1)

        for point in contour:
            sampleComVector.append(complex(point[0][0] - x, (point[0][1] - y)))
        # sampleComVectors store CV of all testees contours
        sampleComVectors.append(sampleComVector)
        # sampleContours store all testees contours, same order with sampleComVectors
        sampleContours.append(contour)


# Calculate fourier transform of templete CV
def getempleteFD():
    return np.fft.fft(templeteComVector)


# Calculate fourier transform of sample CVs
def getsampleFDs():
    FDs = []
    for sampleVector in sampleComVectors:
        sampleFD = np.fft.fft(sampleVector)
        FDs.append(sampleFD)

    return FDs


# Make fourier descriptor invariant to rotaition and start point
def rotataionInvariant(fourierDesc):
    for index, value in enumerate(fourierDesc):
        fourierDesc[index] = np.absolute(value)

    return fourierDesc


# Make fourier descriptor invariant to scale
def scaleInvariant(fourierDesc):
    firstVal = fourierDesc[0]

    for index, value in enumerate(fourierDesc):
        fourierDesc[index] = value / firstVal

    return fourierDesc


# Make fourier descriptor invariant to translation
def transInvariant(fourierDesc):
    return fourierDesc[1:len(fourierDesc)]


# Get the lowest X of frequency values from the fourier values.
def getLowFreqFDs(fourierDesc):
    # frequence order returned by np.fft is (0, 0.1, 0.2, 0.3, ...... , -0.3, -0.2, -0.1)
    # Note: in transInvariant(), we already remove first FD(0 frequency)

    return fourierDesc[:5]


# Get the final FD that we want to use to calculate distance
def finalFD(fourierDesc):
    fourierDesc = rotataionInvariant(fourierDesc)
    fourierDesc = scaleInvariant(fourierDesc)
    fourierDesc = transInvariant(fourierDesc)
    fourierDesc = getLowFreqFDs(fourierDesc)

    return fourierDesc


# Core match function
def match(tpFD, spFDs):
    tpFD = finalFD(tpFD)
    # dist store the distance, same order as spContours
    dist = []
    font = cv2.FONT_HERSHEY_SIMPLEX
    for spFD in spFDs:
        spFD = finalFD(spFD)
        # Calculate Euclidean distance between templete and testee
        dist.append(np.linalg.norm(np.array(spFD) - np.array(tpFD)))
        x, y, w, h = cv2.boundingRect(sampleContours[len(dist) - 1])
        # Draw distance on image
        distText = str(round(dist[len(dist) - 1], 2))
        cv2.putText(imgOri, distText, (x, y - 8), font, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
        # print str(len(dist)) + ": " + str(dist[len(dist)-1])
        # if distance is less than threshold, it will be good match.
        if dist[len(dist) - 1] < distThreshold:
            cv2.rectangle(imgOri, (x - 5, y - 5), (x + w + 5, y + h + 5), (40, 255, 0), 2)


# --------------------------------------------------------------------------
# Main loop
imgOri = cv2.imread(r"E:\new/a2.jpg", 1)
# imOricpy is for processing, imgOri is for showing
imgOricpy = imgOri.copy()
cv2.namedWindow("Original Image")

if manually == True:
    # Manually select templete by mouse
    cv2.setMouseCallback("Original Image", selectTemplete)
else:
    # Default region: upper 6
    rect = (50, 100, 130, 160)
    cv2.rectangle(imgOri, (50, 100), (180, 260), (255, 0, 0), 2)
    temReadyFlag = True
    temConfirmFlag = True

while (True):

    cv2.imshow("Original Image", imgOri)

    if temReadyFlag == True and matchOverFlag == False and temConfirmFlag == True:
        # Get complex vector
        getTempleteCV()
        getSampleCV()
        # Get fourider descriptor
        tpFD = getempleteFD()
        sampleFDs = getsampleFDs()
        # real match function
        match(tpFD, sampleFDs)

        matchOverFlag = True
        cv2.imwrite("result.jpg", imgOri)
        # Resize img for showing
        imgShow = cv2.resize(imgOri, None, fx=0.66, fy=0.66, interpolation=cv2.INTER_CUBIC)
        cv2.imshow("Small Size Show", imgShow)

    key = cv2.waitKey(1) & 0xFF
    if key == ord('y') or key == ord('Y'):
        # Press Y for templete confirm once mouse selection done
        temConfirmFlag = True
    elif key == ord('q') or key == ord('Q'):
        # Press q for quit
        break

cv2.destroyAllWindows()

 

  • 0
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
2D激光SLAM with闭合形状特征:傅里叶系数参数化是一种在2D激光SLAM中使用的闭合形状特征提取方法。傅里叶系数参数化的思想是利用傅里叶级数来描述闭合形状特征的几何形状。 在2D激光SLAM的过程中,激光传感器会扫描周围环境并生成点云数据。对于闭合形状特征,我们可以使用傅里叶系数进行参数化表示。傅里叶系数表示了一个函数在频域上的分解,通过不同的频率分量和相位信息来描述函数的形状。 对于闭合形状特征,比如一个圆或者一个多边形,我们可以通过计算其形状的傅里叶系数来提取特征。首先,我们需要将点云数据进行处理,将其转化为边界点的集合。然后,可以使用离散傅里叶变换(DFT)来计算边界点的傅里叶系数。计算完成后,可以选择保留一部分重要的系数,然后将其转换为特征向量。 利用傅里叶系数参数化的闭合形状特征可以用来定位机器人在环境中的位置。在SLAM的过程中,机器人可以通过激光传感器获取周围环境的点云数据,并提取出闭合形状特征。然后,可以利用特征匹配的方法将当前环境特征与先前保存的特征进行匹配,从而确定机器人在环境中的位置。 总之,2D激光SLAM中的闭合形状特征提取方法傅里叶系数参数化可以帮助机器人在环境中定位自身位置。通过计算并提取闭合形状的傅里叶系数,可以获得环境特征的重要信息,从而实现SLAM的应用。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值