学习笔记:Python for yolo 之 调用摄像头识别物体

前记:

作用说明:学习笔记,主要用于自我记录。

(PS:本人新手,文章仅供参考;如有错误,欢迎各位大神批评指正!)

最近刚刚接触yolo,由于yolo官网和网上各种资料几乎都是基于C语言的,本人觉得python比较简洁,故用python实现了C可实现的部分功能。

该文承接上篇博文“yolo在python下识别本地视频”。此篇介绍(3)yolo用python实现调用摄像头识别物体。

【本系列博文代码基于原作者程序,文中代码由于TAB缩进量可能会出现缩进问题,敬请谅解。】

notice:

本系列博文(包括前几篇)全是基于yolo(darknet)v3 框架的,其他版本的可能不适用,但是都是类似的,可以作为参考。

1、使用环境和平台

ubuntu 14.04+ python2.7+opencv2.4+yolo v3

2、yolo调用摄像头识别视频的python代码

在darknet/python下新建my_webcam_darknet.py

以下是我基于yolo v3框架实现检测摄像头的python代码:

#!coding=utf-8
#modified by leo at 2018.04.26
#function: 1,detect the video captured by webcam 
#          2,you can pass some frames

from ctypes import *
import math
import random
#import module named cv2 to draw
import cv2
import Image

def sample(probs):
    s = sum(probs)
    probs = [a/s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs)-1

def c_array(ctype, values):
    arr = (ctype*len(values))()
    arr[:] = values
    return arr

class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]

class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]

class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]

    

#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
lib = CDLL("/home/username/darknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)

def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res

def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0) 
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res

# 2018.04.25
def showPicResult(image):
    img = cv2.imread(image)
    cv2.imwrite(out_img, img)
    for i in range(len(r)):
        x1=r[i][2][0]-r[i][2][2]/2
        y1=r[i][2][1]-r[i][2][3]/2
        x2=r[i][2][0]+r[i][2][2]/2
        y2=r[i][2][1]+r[i][2][3]/2
        im = cv2.imread(out_img)
        #draw different color rectangle
        cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),(0,255,0),3)
        #putText
        x3 = int(x1+5)
        y3 = int(y1-10)
        font = cv2.FONT_HERSHEY_SIMPLEX
        if ((x3<=im.shape[0]) and (y3>=0)):
            im2 = cv2.putText(im, str(r[i][0]), (x3,y3), font, 1, (0,255,0) , 2)
        else:
            im2 = cv2.putText(im, str(r[i][0]), (int(x1),int(y1+6)), font, 1, (0,255,0) , 2)
        #***********
        #This is a method that works well. 
        cv2.imwrite(out_img, im)
    cv2.imshow('yolo_image_detector', cv2.imread(out_img))
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    
if __name__ == "__main__":
    net = load_net("/home/username/darknet/cfg/yolov2-tiny.cfg", "/home/username/darknet/weights/yolov2-tiny.weights", 0)
    meta = load_meta("/home/username/darknet/cfg/coco.data")
    #origin_img = "/home/username/darknet/data/copy_dog.jpg"
    out_img = "/home/username/darknet/data/test_result.jpg"
    video_tmp = "/home/username/darknet/data/video_tmp.jpg"

    # make a video_object and init the video object
    cap = cv2.VideoCapture(0)
    # define picture to_down' coefficient of ratio
    scaling_factor = 0.5
    count = 0
    # loop until press 'esc' or 'q'
    while True:
        # collect current frame
        ret, frame = cap.read()
        #print ret; if get frame the return ret=True
        # resize the frame
        frame = cv2.resize(frame,None,fx=scaling_factor,fy=scaling_factor,interpolation=cv2.INTER_AREA)
        if ret:
            count = count + 1
            #print count
        #detect and show per 50 frames
        if count == 5:
            count = 0
            img_arr = Image.fromarray(frame)
            img_goal = img_arr.save(video_tmp)
            r = detect(net, meta, video_tmp)
            #print r
            for j in range(len(r)):
                print r[j][0], ' : ', int(100*r[j][1]),"%"
                print r[j][2]
            print ''
            print '#-----------------------------------#'
            #display the rectangle of the objects in window
            showPicResult(video_tmp)
        else:
            continue
        # wait 1ms per iteration; press Esc to jump out the loop 
        c = cv2.waitKey(1)
        if (c==27) or (0xFF == ord('q')):
            break
    # release and close the display_window
    cap.release()

前几篇博文中cfg和weights文件用的是yolov3速度很慢,此文中改用了yolov2-tiny,速度快了很多,效率提升至少10倍。

3、代码解释

    # make a video_object and init the video object
    cap = cv2.VideoCapture(0)
    # define picture to_down' coefficient of ratio
    scaling_factor = 0.5
    count = 0
    # loop until press 'esc' or 'q'
    while True:
        # collect current frame
        ret, frame = cap.read()
        #print ret; if get frame the return ret=True
        # resize the frame
        frame = cv2.resize(frame,None,fx=scaling_factor,fy=scaling_factor,interpolation=cv2.INTER_AREA)
        if ret:
            count = count + 1
            #print count
        #detect and show per 50 frames
        if count == 5:
            count = 0
            img_arr = Image.fromarray(frame)
            img_goal = img_arr.save(video_tmp)
            r = detect(net, meta, video_tmp)
            #print r
            for j in range(len(r)):
                print r[j][0], ' : ', int(100*r[j][1]),"%"
                print r[j][2]
            print ''
            print '#-----------------------------------#'
            #display the rectangle of the objects in window
            showPicResult(video_tmp)
        else:
            continue
        # wait 1ms per iteration; press Esc to jump out the loop 
        c = cv2.waitKey(1)
        if (c==27) or (0xFF == ord('q')):
            break
    # release and close the display_window
    cap.release()

其实代码和上篇博文基本类似,只是把读取本地视频改为了读取摄像头。此处webcam默认设备号为“0”。如使用其他摄像头,请自行查看设备号并修改代码。(如使用kinect(XBox360)需要安装专有驱动)

    cap = cv2.VideoCapture(0)

同样,此代码中也跳过了一些帧(每5帧用yolo识别一次);即使用了yolov2-tiny,识别速度也跟不上帧率,但是效果已经有很大改善,每5帧一次还算可以,而且实际应用中也不需要每帧都去识别,相邻帧相似度很高,检测其中一两帧即可。(只不过跳帧的话可能会导致你要检测的帧刚好画面比较模糊,一点瑕疵。)

程序执行中,按“q”或“Esc”或“ctrl+c”停止。但是很多时候按一次“q”或“Esc”没反应,程序停不下来,多按几次才可停下。(有网友可以解决这个问题的,欢迎指教!当然,后续我自己也会改进,如修改成功,后边也将通过博客告知。)


后记:

程序理解起来很简单,在我自己的电脑上完全可以实现,如有问题欢迎批评指正!

(PS:下一篇介绍“用yolo结合ROS识别视频的python代码实现”)

联系邮箱:2052383522@qq.com


  • 13
    点赞
  • 48
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值