YOLO-Fastest的opencv_dnn部署树莓派

YOLO-Fastest的opencv_dnn部署树莓派

一、代码来源

https://github.com/hpc203/Yolo-Fastest-opencv-dnn

二、环境

1.win10
2.opencv451+dnn
3.树莓派4b

三、PC端

1.配置文件路径
将权重文件和配置文件放入指定文件夹既可,根目录下添加自己的names文件
在这里插入图片描述
修改代码

modelConfiguration = "Yolo-Fastest-voc/yolo-fastest-1.1.cfg"
modelWeights = "Yolo-Fastest-voc/yolo-fastest-1_last.weights"
# Load names of classes
classesFile = "QR.names"

修改为自己的数据既可
测试图像的话,直接用原始代码既可。效果如下。
在这里插入图片描述
下面这个时间是程序运行时间,图像上面的时间是推理用时。推理用时确实很短,但是难搞啊,后面总体用时有点高。

2.修改为摄像头获取图像
为了测试摄像头效果,对源代码进行修改,代码如下

import cv2 as cv
import argparse
import numpy as np
import time

# Initialize the parameters
confThreshold = 0.25  # Confidence threshold
nmsThreshold = 0.4  # Non-maximum suppression threshold
inpWidth = 320  # Width of network's input image
inpHeight = 320  # Height of network's input image

# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "Yolo-Fastest-voc/yolo-fastest-1.1.cfg"
modelWeights = "Yolo-Fastest-voc/yolo-fastest-1_last.weights"
# Load names of classes
classesFile = "QR.names"
classes = None
with open(classesFile, 'rt') as f:
    classes = f.read().rstrip('\n').split('\n')
colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(classes))]

# Get the names of the output layers
def getOutputsNames(net):
    # Get the names of all the layers in the network
    layersNames = net.getLayerNames()
    # print(dir(net))
    # Get the names of the output layers, i.e. the layers with unconnected outputs
    return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]

# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom):
    # Draw a bounding box.
    cv.rectangle(frame, (left, top), (right, bottom), (0,0,255), thickness=4)

    label = '%.2f' % conf

    # Get the label for the class name and its confidence
    if classes:
        assert (classId < len(classes))
        label = '%s:%s' % (classes[classId], label)

    # Display the label at the top of the bounding box
    labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    top = max(top, labelSize[1])
    # cv.rectangle(frame, (left, top - round(1.5 * labelSize[1])), (left + round(1.5 * labelSize[0]), top + baseLine), (255,255,255), cv.FILLED)
    cv.putText(frame, label, (left, top-10), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), thickness=2)
    return frame
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
    frameHeight = frame.shape[0]
    frameWidth = frame.shape[1]

    # Scan through all the bounding boxes output from the network and keep only the
    # ones with high confidence scores. Assign the box's class label as the class with the highest score.
    classIds = []
    confidences = []
    boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            classId = np.argmax(scores)
            confidence = scores[classId]
            if confidence > confThreshold:
                center_x = int(detection[0] * frameWidth)
                center_y = int(detection[1] * frameHeight)
                width = int(detection[2] * frameWidth)
                height = int(detection[3] * frameHeight)
                left = int(center_x - width / 2)
                top = int(center_y - height / 2)
                classIds.append(classId)
                confidences.append(float(confidence))
                boxes.append([left, top, width, height])

    # Perform non maximum suppression to eliminate redundant overlapping boxes with
    # lower confidences.
    indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
    return frame

if __name__=='__main__':
    parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
    #parser.add_argument('--image', type=str, default='QR-00064.jpg', help='Path to image file.')
    args = parser.parse_args()

    net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
    net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
    net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

    vid = cv.VideoCapture(0)

    while True:
        t0 = time.time()
        _, frame = vid.read()
        # Create a 4D blob from a frame.
        blob = cv.dnn.blobFromImage(frame, 1/255.0, (inpWidth, inpHeight), [0, 0, 0], swapRB=False, crop=False)
        # Sets the input to the network
        net.setInput(blob)
        # Runs the forward pass to get output of the output layers
        outs = net.forward(getOutputsNames(net))
 
        
        srcimg = postprocess(frame, outs)
        cv.imshow("result", srcimg)
        print(f'Done. ({time.time() - t0:.5f}s)')

        if cv.waitKey(1) & 0xFF == ord('q'):
            break
    
    vid.release()
    cv.destroyAllWindows()
    '''
    # Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
    t, _ = net.getPerfProfile()
    label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
    cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
    
    print(f'Done. ({time.time() - t0:.5f}s)')

    winName = 'Deep learning object detection in OpenCV'
    cv.namedWindow(winName,0)
    cv.imshow(winName, frame)
    cv.waitKey(0)
    cv.destroyAllWindows()
    '''

PC端只用CPU速度大概是30ms左右,感觉也不是很快。

四、树莓派测试

1.树莓派必须提前编译好opencv+dnn环境,版本要4以上。
2.速度也才180ms,继续优化!

  • 2
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小俊俊的博客

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值