机器视觉实用工具集NO.10——使用深度学习模型(yolo3)实现物体检测工具

前言

OpenCV 3.3版本之后提供了对主流深度学习主干网络框架的加载支持。opencv给我们提供了一个应用成熟深度学习模型的便捷工具。
YOLO是一个优秀的物体及人体检测深度神经网络模型,可以通过opencv快速部署检测应用。

opencv+YOLO3实现物体检测

YOLO3里面训练好的能够检测的物体有80种,涵盖了人、鸟、车、沙发等日常动物、物品。且相对来说,实时性比较高。opencv+yolo3部署一个物体检测应用比较间接,100多行代码就可以实现。
在这里插入图片描述

python源码

# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 22:50:59 2021
@author: JAMES FEI
Copyright (C) 2021 FEI PANFENG, All rights reserved.
THIS SOFTEWARE, INCLUDING DOCUMENTATION,IS PROTECTED BY COPYRIGHT CONTROLLED 
BY FEI PANFENG ALL RIGHTS ARE RESERVED.
"""
import numpy as np
import cv2
import base64


def file2base64(img):        
     base64_encode = base64.b64encode(img).decode('utf-8')
     return base64_encode

# base64 to numpy array with opencv
def bas642mat_cv(base64_encode):
     base64_decode = base64.b64decode(base64_encode)
     img_array = np.frombuffer(base64_decode, np.uint8)
     img = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)
     return img   
# 画图

class Img_Detor:

    net=None
    output_layer_names=None
    lables=["person","bicycle","car","motorcycle","airplane","bus","train","truck","boat","traffic light","fire hydrant","stop sign",
            "parking meter","bench","bird","cat","dog","horse","sheep","cow","elephant","bear","zebra","giraffe","backpack","umbrella",
            "handbag","tie","suitcase","frisbee","skis","snowboard","sports ball","kite","baseball bat","baseball glove","skateboard","surfboard",
            "tennis racket","bottle","wine glass","cup","fork","knife","spoon","bowl","banana","apple","sandwich","orange","broccoli",
            "carrot","hot dog","pizza","donut","cake","chair","couch","potted plant","bed","dining table","toilet","tv","laptop","mouse",
            "remote","keyboard","cell phone","microwave","oven","toaster","sink","refrigerator","book","clock","vase","scissors",
            "teddy bear","hair drier","toothbrush"]
    
    

    jobs=[]
     
    def __init__(self, **kwargs):
        self.mathods=dir(self)        

        self.net, self.output_layer_names = self.load_network("yolov3.cfg", "yolov3.weights")
        print("初始化YOLO")

    

    def load_network(self,config_path, weights_path):
        net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
        output_layer_names = net.getLayerNames()
        output_layer_names = [output_layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
        return net, output_layer_names
    

    def YOLO(self,strimg,confident=0.5,overlap=0.5):
    #cap=cv2.VideoCapture(0)
    #sucess,img=cap.read()   
    #img = cv2.imread("888.png")   
        img = bas642mat_cv(strimg)#专成矩阵
        blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)
        self.net.setInput(blob)
        layer_outputs = self.net.forward(self.output_layer_names)  
        boxes, confidences, class_IDs = [], [], []
        H, W = img.shape[:2]
        for output in layer_outputs:
            for detection in output:
                scores = detection[5:]
                classID = np.argmax(scores)
                confidence = scores[classID]
                if confidence > 0.5:
                    box = detection[0:4] * np.array([W, H, W, H])
                    centerX, centerY, width, height = box.astype("int")
                    x, y = int(centerX - (width / 2)), int(centerY - (height / 2))
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    class_IDs.append(classID)
        indices = cv2.dnn.NMSBoxes(boxes, confidences,confident,overlap)
        BOXS=[]
        if len(indices) > 0:
            # loop over the indexes we are keeping
            aa=indices.flatten()
            for i in aa:
                # extract the bounding box coordinates
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])
                BOXS.append([(x,y),(w,h),self.lables[class_IDs[i]],confidences[i]])
        # ensure at least one detection exists
        return BOXS

if __name__ == '__main__':
    
    a=Img_Detor()
    #img=cv2.imread("888.png")    
    #d=a.YOLO(img)
    encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),15]
    cap=cv2.VideoCapture(0)  
    while True:
        sucess,img=cap.read()
        result, imgencode = cv2.imencode('.jpg', img, encode_param)         
        strimg=file2base64(imgencode)
        box=a.YOLO(strimg)
        if type(box)==type([]):    
            for b in box:        
                (x,y)=b[0]
                (w,h)=b[1]
                text = "{}: {:.4f}".format(b[2], b[3])
                cv2.putText(img, text, (x, y - 5), cv2.FONT_ITALIC, 0.5, [0, 255, 0], 2)
                cv2.rectangle(img, (x, y), (x + w, y + h), (255,255,0), 2)
            cv2.imshow("mini_eye",img)
        
        
        k=cv2.waitKey(1)
        if k == 27:
            #通过esc键退出摄像
            cv2.destroyAllWindows()
            break

其中,用到的"yolov3.cfg"模型和 "yolov3.weights"权重,可以在以下连接资源下载:
https://download.csdn.net/download/kanbide/83975622

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

JAMES费

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值