【视觉】树莓派opencv常用操作

【视觉】树莓派opencv常用操作

[^By LCB]: By lcb

零、Import

import cv2
import numpy as np

一 、摄像头初始化

#开启摄像头
cap = cv2.VideoCapture(1)
#设置分辨率 VGA(640*480)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

frame = cv2.flip(frame, 0)  # 镜头上下反转
frame = cv2.flip(frame,1)  #镜头水平翻转

二、创建掩码

图像颜色空间转换
#转换成HSV
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
#转换成灰度
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
选定HSV阈值

# 设置红色激光的颜色范围
lower_red_laser = np.array([148, 0, 0])
upper_red_laser = np.array([187, 255, 255])

# 设置green激光的颜色范围
lower_green_laser = np.array([46, 0, 243])
upper_green_laser = np.array([87, 255, 255])

# 读取当前帧
ret, frame = cap.read()
# 转换色彩空间为HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

mask_red_laser = cv2.inRange(hsv, lower_red_laser, upper_red_laser)

mask_red_laser 返回为二值图(阈值内的部分为白色)

静态目标去除

作用于二值图

fgbg = cv2.createBackgroundSubtractorMOG2()
fgmask = fgbg.apply(mask1)

三、形态学操作

形态转换 - 【布客】OpenCV 4.0.0 中文翻译 (apachecn.org)


# 常用核
kernel1 = np.ones((5, 5), dtype=np.uint8)
kernel2 = np.ones((3, 3), dtype=np.uint8)

形态学操作的对象一般为二值图

#开运算和闭运算
#开运算 = 先进行腐蚀再膨胀 被用来去除噪音
#闭运算 = 先膨胀再腐蚀 被用来填充前景物体中的小洞
mask_green = cv2.morphologyEx(mask_green, cv2.MORPH_OPEN, kernel1, iterations=2)
mask_green = cv2.morphologyEx(mask_green, cv2.MORPH_CLOSE, kernel1, iterations=2)

#梯度 = 膨胀 - 腐蚀 (看上去像得到了轮廓)
gradient = cv2.morphotogyEx(img,cv2.MORPH_GRADIENT,kernel)
#腐蚀
erosion = cv2.erode(img,kernel,iterations=1)
#膨胀
dilation = cv2.dilation(img,kernel,iterations=1)
#顶帽 = 原图 - 开运算
tophat = cv2.morphotogyEx(img, cv2.MORPH_TOPHAT, kernel)
#黑帽 = 闭运算 - 原图
blackhat = cv2.morphotogyEx(img, cv2.MORPH_BLACKHAT, kernel)

四、轮廓的属性与操作

轮廓的属性长宽比、固实性、圆度等多种属性能在实际应用中锁定特定的目标,排除干扰,详细文档有如下参考

OpenCV中的轮廓 - 【布客】OpenCV 4.0.0 中文翻译 (apachecn.org)

(1)轮廓查找
contours, hierarchy = cv.findContours(source_img, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)

在cv.findContours函数中有三个参数,第一个是源图像,第二个是轮廓检索模式,第三个是轮廓逼近方法。并输出轮廓和层次。轮廓是图像中所有轮廓的Python列表。每个单独的轮廓都是对象边界点的(x,y)坐标的Numpy数组

CV_RETR_EXTERNAL只检测最外围轮廓,包含在外围轮廓内的内围轮廓被忽略

CV_RETR_LIST 检测所有的轮廓

CV_RETR_TREE, 检测所有轮廓,所有轮廓建立一个等级树结构。外层轮廓包含内层轮廓,内 层轮廓还可以继续包含内嵌轮廓。

返回轮廓的list,list中每个对象都代表一个Numpy数组形式的轮廓

(2)轮廓绘制
#绘制一个轮廓
cv.drawContours(img, [cnt], 0, (0,255,0), 3)
(3)轮廓确定其中心点

可以查找轮廓的多个属性,这里用轮廓的四个极点求中心点

【Example】激光点的找寻
red_laser_contours, _ = cv2.findContours(mask_red_laser, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# print("red_laser_contours",red_laser_contours)
if red_laser_contours != ():
    red_laser_contour = max(red_laser_contours, key=cv2.contourArea)
    extLeft = tuple(red_laser_contour[red_laser_contour[:, :, 0].argmin()][0])[0]
    #取极左点的x坐标[0]
    extRight = tuple(red_laser_contour[red_laser_contour[:, :, 0].argmax()][0])[0]
    extTop = tuple(red_laser_contour[red_laser_contour[:, :, 1].argmin()][0])[1]
    #取极上点的y坐标[1]
    extBot = tuple(red_laser_contour[red_laser_contour[:, :, 1].argmax()][0])[1]

    red_laser_x = int((extLeft + extRight) / 2)
    red_laser_y = int((extTop + extBot) / 2)

    cv2.circle(board_img, (red_laser_x, red_laser_y), 1, (0, 255, 0), 4)
(4)轮廓近似为多边形
	epsilon_exter = 0.02 * cv2.arcLength(external_black_contour, True)
    epsilon_inner = 0.02 * cv2.arcLength(inner_black_contour, True)

    approx_exter = cv2.approxPolyDP(external_black_contour, epsilon_exter, True)
    approx_inner = cv2.approxPolyDP(inner_black_contour, epsilon_inner, True)
    #approxPolyDP 返回每个角点的信息
【Example】三角形的确定
black_contours, hierarchys = cv2.findContours(mask_black, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
if black_contours != () and hierarchys != ():
    #寻找内廓
    for contour, hierarchy in zip(black_contours, hierarchys[0]):
        if (hierarchy[3] != -1):#如果是内轮廓
            inner_black_contour = contour
            break
  	#寻找外轮廓
 	external_black_contour = max(black_contours, key=cv2.contourArea)
    if external_black_contour is not None and inner_black_contour is not None:
    	epsilon_exter = 0.02 * cv2.arcLength(external_black_contour, True)
        epsilon_inner = 0.02 * cv2.arcLength(inner_black_contour, True)

        approx_exter = cv2.approxPolyDP(external_black_contour, epsilon_exter, True)
        approx_inner = cv2.approxPolyDP(inner_black_contour, epsilon_inner, True)
        if len(approx_exter) == 3 and len(approx_inner) == 3:
            pass #找到双层三角形
(5)轮廓裁剪

外接矩形

    x, y, w, h = cv2.boundingRect(cnt[0])
【Example】以轮廓外接矩形为边界裁剪并缩放
    cnt = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[0]
    x, y, w, h = cv2.boundingRect(cnt[0])
    #裁剪(以轮廓外接矩形为边界)并缩放
    digit_roi = cv2.resize(img_temp[y:y + h, x:x + w], (57, 88))

最小矩形

rect = cv2.minAreaRect(contour)
【Example】以轮廓最小矩形为边界透视矫正并缩放

该算法的四角点的排序的矫正系GPT所写,局限性在于,矩形倾斜角度大于45度时,矫正后的图像为左旋90度或右旋转90度,而非正立

rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
srcArr = np.float32(box)
cv2.imshow('mask', mask)
cv2.imshow('image', image)
print("len(approx)",len(approx))
# 确保逼近后的轮廓是四边形 且 y > 30
while len(approx)!= 4 or (y < 30) :
    #print("目标丢失")
    ret, image = cap.read()
    cv2.imshow('image', image)
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, lower_white, upper_white)
    # 寻找轮廓
    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contour = max(contours, key=cv2.contourArea)
    cv2.drawContours(image, [contour], 0, (0, 255, 0), 3)
    # 进行轮廓逼近
    epsilon = 0.02 * cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, epsilon, True)
    x, y, w, h = cv2.boundingRect(contour)
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    srcArr = np.float32(box)
    cv2.imshow('mask', mask)
    cv2.imshow('image', image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        print(digit_out[0])
        break

# 将角点按顺时针排序
approx = approx.reshape(4, 2)
rect = np.zeros((4, 2), dtype=np.float32)

# 计算角点的总和,最小值为左上角点
s = np.sum(approx, axis=1)
rect[0] = approx[np.argmin(s)]

# 计算角点的差值,最大值为右下角点
diff = np.diff(approx, axis=1)
rect[2] = approx[np.argmax(diff)]

# 计算角点的差值,最小值为右上角点
rect[1] = approx[np.argmin(diff)]

# 计算角点的总和,最大值为左下角点
rect[3] = approx[np.argmax(s)]

print("rect =", rect)
srcArr = np.float32(rect)

docCnt = np.int0(box)

result_img = four_point_transform(image, srcArr.reshape(4, 2))  # 对原始图像进行四点透视变换
result_img = cv2.resize(result_img, (57, 88), interpolation=cv2.INTER_CUBIC)

五、图像几何变换

几何变换 · OpenCV-Python初学自码 · 看云 (kancloud.cn)

上链接为opencv三个变换的函数,之前找四个绿色色块用的另一个变换的方法four_point_transform

(1)四点透视变换

Python opencv 图像矫正——透视变换_python opencv图像校正

from imutils.perspective import four_point_transform
import imutils
import cv2

# srcArr 为顺时针四个角点
ret, image = cap.read()
# board_img:白板图片
board_img = four_point_transform(image, srcArr)  # 对原始图像进行四点透视变换
【Example】通过四个绿色色块锁定白板
while (getCornor == False):
    ret, frame = cap.read()
    # 转换色彩空间为HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # 创建掩码 绿色的掩码
    mask_green = cv2.inRange(hsv, lower_green, upper_green)

    # 对掩码进行形态学处理
    kernel1 = np.ones((5, 5), dtype=np.uint8)
    kernel2 = np.ones((5, 5), dtype=np.uint8)
    kernel3 = np.ones((3, 3), dtype=np.uint8)
    
    mask_green = cv2.erode(mask_green, kernel1, 1)
    mask_green = cv2.morphologyEx(mask_green, cv2.MORPH_OPEN, kernel1, iterations=2)
    mask_green = cv2.dilate(mask_green, kernel2, 3)

    # 绿色为四角 通过四角锁定图片
    contours, _ = cv2.findContours(mask_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    Xarrow = [0] * 4
    Yarrow = [0] * 4
    i = 0
    findLeftHigh = [0] * 4 
    #findLeftHigh数组 实际上存储了四个角的Point坐标
    findRightHigh = 0
    findRightHighIndex = 0
    findLeftLow = 0
    findLeftLowIndex = 0
    TARGET_POINTS = [None] * 4
    print("len(contours)", len(contours), "i", i)
    if (len(contours) == 4 and i <= 4):
        #存储4个绿色矩形的中点
        #x+y到findLeftHigh[]
        #x到Xarrow[] 
        #y到Yarrow[] 
        for contour in contours:
            cv2.drawContours(frame, [contour], 0, (0, 255, 0), 3)
            extLeft = tuple(contour[contour[:, :, 0].argmin()][0])[0]
            extRight = tuple(contour[contour[:, :, 0].argmax()][0])[0]
            extTop = tuple(contour[contour[:, :, 1].argmin()][0])[1]
            extBot = tuple(contour[contour[:, :, 1].argmax()][0])[1]

            Xarrow[i] = int((extLeft + extRight) / 2)
            Yarrow[i] = int((extTop + extBot) / 2)
            findLeftHigh[i] = Xarrow[i] + Yarrow[i]
            i = i + 1
        #找到最左上角的Point
        leftHigh = (
Xarrow[findLeftHigh.index(min(findLeftHigh))], Yarrow[findLeftHigh.index(min(findLeftHigh))])
        #找到最右下角的Point
        rightLow = (
        Xarrow[findLeftHigh.index(max(findLeftHigh))], Yarrow[findLeftHigh.index(max(findLeftHigh))])
        i = 0
        #找到 另两点
        for i in range(0, 4):
            if i == findLeftHigh.index(min(findLeftHigh)) or i == findLeftHigh.index(max(findLeftHigh)):
                continue
            if (findRightHigh < Xarrow[i] - Xarrow[findLeftHigh.index(min(findLeftHigh))]):
                findRightHigh = Xarrow[i] - Xarrow[findLeftHigh.index(min(findLeftHigh))]
                findRightHighIndex = i
            if (findLeftLow < Yarrow[i] - Yarrow[findLeftHigh.index(min(findLeftHigh))]):
                findLeftLow = Yarrow[i] - Yarrow[findLeftHigh.index(min(findLeftHigh))]
                findLeftLowIndex = i
        #找到最右上角的Point
        rightHigh = (Xarrow[findRightHighIndex], Yarrow[findRightHighIndex])
        #找到最左下角的Point
        leftLow = (Xarrow[findLeftLowIndex], Yarrow[findLeftLowIndex])
        TARGET_POINTS = [leftHigh, rightHigh, rightLow, leftLow]
    print("TARGET_POINTS", TARGET_POINTS)

    cv2.imshow('frame', frame)
    cv2.imshow('mask_green', mask_green)
	
    #再次排序
    if len(contours) == 4:
        cv2.destroyWindow('frame')
        cv2.destroyWindow('mask_green')
        # 将角点按顺时针排序
        approx = np.array(TARGET_POINTS)
        approx = approx.reshape(4, 2)
        rect = np.zeros((4, 2), dtype=np.float32)
        # 计算角点的总和,最小值为左上角点
        s = np.sum(approx, axis=1)
        rect[0] = approx[np.argmin(s)]
        # 计算角点的差值,最大值为右下角点
        diff = np.diff(approx, axis=1)
        rect[2] = approx[np.argmax(diff)]
        # 计算角点的差值,最小值为右上角点
        rect[1] = approx[np.argmin(diff)]
        # 计算角点的总和,最大值为左下角点
        rect[3] = approx[np.argmax(s)]
        srcArr = np.float32(rect)

        srcArr = srcArr.reshape(4, 2)
        getCornor = True
        break
    if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.destroyWindow('frame')
        cv2.destroyWindow('mask_green')
        break
(2)极坐标与直角坐标变换

可以用来在圆形坐标系进行操作

import math
from math import sqrt
from math import sin, cos

def length(v):           #计算二维向量长度
    return sqrt(v[0]**2 + v[1]**2)

def to_polar(vector):   #把笛卡尔坐标转化为极坐标
    x, y = vector[0], vector[1]
    angle = math.atan2(y, x)
    return (length(vector), angle)

def to_cartesian(polar_vector):    #接收一对极坐标(长度和弧度)返回相应的笛卡尔坐标
    length, angle = polar_vector[0], polar_vector[1]
    return (length*cos(angle), length*sin(angle))

六、OpenCV绘图

#线段
cv2.line(img,Point1,Point2, (0, 255, 255), thickness=2)
#圆
cv2.circle(board_img, centerPoint, 1, (0, 255, 0), 4)
#矩形
cv.rectangle(img,LeftHighPoint,RightLowPoint,(0,255,0),3)

#绘制轮廓
img2, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# 第三个参数传递值为-1,绘制所有轮廓
cv.drawContours(img, contours, -1, (0, 255, 0), 3)

cnt = contours[3]
#绘制一个轮廓
cv.drawContours(img, [cnt], 0, (0,255,0), 3)
#文本
cv2.putText(frame, "文字", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

七、模板匹配

(1)加载模板图片
def load_digits():
    # 加载数字模板
    path = img_address
    filename = os.listdir(path)
    for file in filename:
        img = cv2.imread(img_address + "\\" + file)
        #图像转灰度
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #灰度转二值
        img_temp = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        cnt = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[0]
        x, y, w, h = cv2.boundingRect(cnt[0])
        #裁剪(以轮廓最小矩形为边界)并缩放
        digit_roi = cv2.resize(img_temp[y:y + h, x:x + w], (57, 88))
        digits.append(digit_roi)  
    print("successfully load digits")

模板达到的效果:二值,尺寸大小一致,好遍历

(2)搜寻到待匹配目标图片

参考【Example】以轮廓最小矩形为边界透视矫正并缩放

待匹配目标需要达到的效果:二值,尺寸与模板相同

(3)模板匹配
source = []
digit_out = []

for digitROI in digits:
    res = cv2.matchTemplate(result_black, digitROI, cv2.TM_CCOEFF_NORMED)  
    # 模板匹配 归一化相关系数匹配(NCC)
    max_val = cv2.minMaxLoc(res)[1]  # 根据 cv2.matchTemplate() 的匹配结果,返回图形坐标结果
    print("max_val=: ", max_val)  # 返回值越接近1,则越匹配
    source.append(max_val)
digit_out.append(str(source.index(max(source))))
if(max(source)<0.5):
    digit_out[0] = 666

八、机器学习

K-最近邻算法 - 【布客】OpenCV 4.0.0 中文翻译 (apachecn.org)

【Example】使用 kNN 进行手写识别 与 目标追踪

注:

① 下面 代码的 train_image, train_label = load_mnist(“train-images.idx3-ubyte”, “train-labels.idx1-ubyte”)需要在同目录下保存这两个模型文件才能使用

② 文件存储路径为绝对路径,需要修改

另外:目标追踪算法例程也在此处

import cv2
import sys
import matplotlib.pyplot as plt
import struct,os
import numpy as np
from array import array as pyarray
from numpy import append, array, int8, uint8, zeros
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score,classification_report

def load_mnist(image_file, label_file, path="."):
    digits=np.arange(10)

    fname_image = os.path.join(path, image_file)
    fname_label = os.path.join(path, label_file)

    flbl = open(fname_label, 'rb')
    magic_nr, size = struct.unpack(">II", flbl.read(8))
    lbl = pyarray("b", flbl.read())
    flbl.close()

    fimg = open(fname_image, 'rb')
    magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
    img = pyarray("B", fimg.read())
    fimg.close()

    ind = [ k for k in range(size) if lbl[k] in digits ]
    N = len(ind)

    images = zeros((N, rows*cols), dtype=uint8)
    labels = zeros((N, 1), dtype=int8)
    for i in range(len(ind)):
        images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((1, rows*cols))
        labels[i] = lbl[ind[i]]

    return images, labels


if __name__ == '__main__' :
 
    #train image and label set 
    train_image, train_label = load_mnist("train-images.idx3-ubyte", "train-labels.idx1-ubyte")
    test_image, test_label = load_mnist("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte")

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'CSRT','MOSSE']
    tracker_type = tracker_types[5]
 
 
    if tracker_type == 'BOOSTING':
        tracker = cv2.legacy.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.legacy.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.legacy.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv2.legacy.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv2.legacy.TrackerMedianFlow_create()
    if tracker_type == "CSRT":
        tracker = cv2.legacy.TrackerCSRT_create()
    if tracker_type == "MOSSE":
        tracker = cv2.legacy.TrackerMOSSE_create()

    TailsListX=[]
    TailsListY=[]
    TailsNumberCount=0
    point_color = (255, 0, 255)
    # Read video
    video = cv2.VideoCapture(0)
    width = 640  #定义摄像头获取图像宽度
    height = 480   #定义摄像头获取图像长度

    video.set(cv2.CAP_PROP_FRAME_WIDTH, width)  #设置宽度
    video.set(cv2.CAP_PROP_FRAME_HEIGHT, height)  #设置长度

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()
 
    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()
    
    # Define an initial bounding box
    bbox = (287, 23, 86, 320)
 
    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)
 
    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    
    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break
        
        # Start timer
        timer = cv2.getTickCount()
 
        # Update tracker
        ok, bbox = tracker.update(frame)
 
        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
 
        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            # Target save
            TailsListX.append(int(bbox[0]))
            TailsListY.append(abs(480-int(bbox[1])))
            TailsNumberCount+=1
            print(p1)
            cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
        else :
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
 
        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2)
    
        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
 
 
        # Display result
        cv2.imshow("Tracking", frame)
 
        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27 : 
            break
                #cv2.circle(frame, point, 10, point_color, 4)
                #cv2.imshow("Tails", frame)
    plt.plot(TailsListX, TailsListY, color='k',lw=24)
    plt.axis('off')
  
    plt.savefig('D:/Python/test.png')
    img=cv2.imread("D:/Python/test.png",0)

    height_gray, width_gray = img.shape[:2]
    for row in range(height_gray):
        for col in range(width_gray):
            img[row][col] = 255 - img[row][col]

    resize_img=cv2.resize(img, (28,28))

    cv2.imwrite('test.png', resize_img)

    feature = np.reshape(resize_img,(28*28))

    feature=feature.reshape(1, -1)

    knn = KNeighborsClassifier()
    knn.fit(train_image,train_label)
    predict = knn.predict(feature)
    print(predict)
    # print("accuracy_score: %.4lf" % accuracy_score(predict,test_label))

    # cv2.imshow("dst: %d x %d" % (resize_img.shape[0], resize_img.shape[1]), resize_img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()  
    #plt.show()

九、树莓派硬件

树莓派4B全40管脚对应功能示意图

(1) 串口通信

Python 中打包和解包的操作

Python中的struct - 知乎 (zhihu.com)

import struct
import serial

ser = serial.Serial("/dev/ttyAMA0", 115200)

def Sending_Mission_1_Data(x, x_direction, y, y_direction):
    global ser
    data = struct.pack("<BBBBB",
                       0xAA, x, x_direction, y, y_direction
                       )
    ser.write(data)
(2) GPIO中断
import RPi.GPIO as GPIO  #引入GPIO模块
#三个引脚 BCM编码
channel1 = 18
channel2 = 23
channel3 = 24
channel = [channel1, channel2, channel3]

#监听回调函数
def my_callback(touchPin):
    global Mission, Mission_receive
    Mission_receive = GPIO.input(channel1) + GPIO.input(channel2) * 2 + GPIO.input(channel3) * 4
    if Mission_receive >= 0 and Mission_receive <= 7:
        Mission = Mission_receive
        print("Mission get:", Mission)
        
        
if __name__ == "__main__":
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(channel, GPIO.IN)
    Mission_receive = GPIO.input(channel1) + GPIO.input(channel2) * 2 + GPIO.input(channel3) * 4
    if Mission_receive >= 0 and Mission_receive <= 7:
    Mission = Mission_receive
    print("Mission get:", Mission)
    print('Init OK')
    #加入监听 3个IO口一旦出现变化沿 进入my_callback函数
    GPIO.add_event_detect(channel1, GPIO.BOTH, callback=my_callback)
    GPIO.add_event_detect(channel2, GPIO.BOTH, callback=my_callback)
    GPIO.add_event_detect(channel3, GPIO.BOTH, callback=my_callback)
    print('new event add OK')
(3) 定时器与延时
import time


GreenTailTimeCount=0.0
 while ( GreenTailTimeCount<8):
        WhileTimeStart=time.time()
        #计时
        #需要定时的操作
        WhileTimeEnd=time.time()
        GreenTailTimeCount+=WhileTimeEnd-WhileTimeStart

附录一 opencv python可查的文档

OpenCV-Python初学自码 · 看云 (kancloud.cn)

【布客】OpenCV 4.0.0 中文翻译 (apachecn.org)

官网英文文档OpenCV: OpenCV modules

注意opencv版本不同的函数名差异

附录二 树莓派引脚图

树莓派4B全40管脚对应功能示意图

附录三 阈值调参工具

(1)阈值选择
import cv2
import numpy as np

def nothing(x):
    pass

def mouse_callback(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        hsv_value = hsv[y, x]
        print(f"Clicked position: ({x}, {y})")
        print(f"HSV value: {hsv_value}")

# 创建一个窗口
cv2.namedWindow('Image')

# 创建滑动条来调节阈值
cv2.createTrackbar('Low_Hue', 'Image', 0, 255, nothing)
cv2.createTrackbar('Low_Saturation', 'Image', 0, 255, nothing)
cv2.createTrackbar('Low_Value', 'Image', 0, 255, nothing)
# 创建滑动条来调节阈值
cv2.createTrackbar('High_Hue', 'Image', 0, 255, nothing)
cv2.createTrackbar('High_Saturation', 'Image', 0, 255, nothing)
cv2.createTrackbar('High_Value', 'Image', 0, 255, nothing)
# 打开摄像头
cap = cv2.VideoCapture(0)

while True:
    # 读取摄像头图像
    ret, frame = cap.read()
    # 将图像转换为HSV颜色空间
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # 获取滑动条的当前值
    low_hue = cv2.getTrackbarPos('Low_Hue', 'Image')
    low_saturation = cv2.getTrackbarPos('Low_Saturation', 'Image')
    low_value = cv2.getTrackbarPos('Low_Value', 'Image')
    # 获取滑动条的当前值
    high_hue = cv2.getTrackbarPos('High_Hue', 'Image')
    high_saturation = cv2.getTrackbarPos('High_Saturation', 'Image')
    high_value = cv2.getTrackbarPos('High_Value', 'Image')
    # 定义HSV的阈值范围
    lower_threshold = np.array([low_hue, low_saturation, low_value])
    upper_threshold = np.array([high_hue, high_saturation, high_value])
    # 对图像进行二值化处理
    thresholded = cv2.inRange(hsv, lower_threshold, upper_threshold)
    # 显示原始图像和二值化图像
    cv2.imshow('Image', np.hstack([frame, cv2.cvtColor(thresholded, cv2.COLOR_GRAY2BGR)]))
    cv2.setMouseCallback('Image', mouse_callback)
    # 按下'q'键退出循环
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
# 释放摄像头并关闭窗口
cap.release()
cv2.destroyAllWindows()
(2)阈值自适应
import cv2

# 全局变量,用于存储鼠标选择的区域坐标和标志位
selected_rect = None
selecting = False

def on_mouse(event, x, y, flags, param):
    global selected_rect, selecting
    if event == cv2.EVENT_LBUTTONDOWN:
        selected_rect = (x, y, 0, 0)
        selecting = True
    elif event == cv2.EVENT_LBUTTONUP:
        selected_rect = (selected_rect[0], selected_rect[1], x - selected_rect[0], y - selected_rect[1])
        selecting = False
def main():
    global selected_rect

    # 打开摄像头
    cap = cv2.VideoCapture(0)

    cv2.namedWindow("Real-time Video")
    cv2.setMouseCallback("Real-time Video", on_mouse)

    # 循环直到成功选定区域
    while True:
        # 读取一帧
        ret, frame = cap.read()
        if not ret:
            break
        if selecting and selected_rect is not None:
            # 在视频帧上绘制鼠标选择的矩形框
            x, y, w, h = selected_rect
            cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
        # 显示实时视频
        cv2.imshow("Real-time Video", frame)
        # 如果选择了区域,则进行一次区域选定后的处理
        if not selecting and selected_rect is not None and selected_rect[2] > 0 and selected_rect[3] > 0:
            hsvImage = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)  # 将RGB转成HSV
            x, y, w, h = selected_rect
            imagePart = hsvImage[y:y+h, x:x+w]
            channels = imagePart.shape[-1]
            if channels == 3:
                hValues = imagePart[:, :, 0].mean()  # H通道平均值
                sValues = imagePart[:, :, 1].mean()  # S通道平均值
                vValues = imagePart[:, :, 2].mean()  # V通道平均值

                if not (hValues != hValues or sValues != sValues or vValues != vValues):
                    print("Mean H value:", hValues)
                    print("Mean S value:", sValues)
                    print("Mean V value:", vValues)

                    # 使用区域HSV均值进行阈值滤波
                    lower_bound = (int(hValues - 10), int(sValues - 50), int(vValues - 60))
                    upper_bound = (int(hValues + 10), int(sValues + 50), int(vValues + 30))
                    print("lower_bound",lower_bound,"upper_bound",upper_bound)

                    # 循环结束,固定使用区域HSV均值对整个原始图像进行滤波
                    break

        # 按下'q'键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 继续处理整个原始图像并进行滤波
    while True:
        # 读取一帧
        ret, frame = cap.read()
        if not ret:
            break
        # 进行滤波操作
        hsvImage = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)  # 将RGB转成HSV
        filteredImage = cv2.inRange(hsvImage, lower_bound, upper_bound)
        # 显示滤波后的图像
        cv2.imshow("Filtered Image", filteredImage)
        # 显示实时视频
        cv2.imshow("Real-time Video", frame)
        # 按下'q'键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 释放摄像头资源
    cap.release()
    cv2.destroyAllWindows()
if __name__ == "__main__":
    main()
(int(hValues + 10), int(sValues + 50), int(vValues + 30))
                    print("lower_bound",lower_bound,"upper_bound",upper_bound)

                    # 循环结束,固定使用区域HSV均值对整个原始图像进行滤波
                    break

        # 按下'q'键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 继续处理整个原始图像并进行滤波
    while True:
        # 读取一帧
        ret, frame = cap.read()
        if not ret:
            break
        # 进行滤波操作
        hsvImage = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)  # 将RGB转成HSV
        filteredImage = cv2.inRange(hsvImage, lower_bound, upper_bound)
        # 显示滤波后的图像
        cv2.imshow("Filtered Image", filteredImage)
        # 显示实时视频
        cv2.imshow("Real-time Video", frame)
        # 按下'q'键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 释放摄像头资源
    cap.release()
    cv2.destroyAllWindows()
if __name__ == "__main__":
    main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

绿茶冰

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值