yolov3 使用darknet的python接口使用

转载地址

  1. yolov3 使用darknet的python接口处理单张图片和视频和摄像头视频流
  2. yolov3-python接口调用

准备

project 下载

git clone https://github.com/pjreddie/darknet
cd darknet
#make之前要改一些配置 ,具体操作见 我的上一份博客的结尾部分
#(https://blog.csdn.net/qq_20241587/article/details/111176541)
make
 
#下载模型文件,大概200+M ,复制到 darknet 文件夹下
wget https://pjreddie.com/media/files/yolov3.weights
 
./darknet detect cfg/yolov3.cfg yolov3.weights data/dog.jpg #默认使用第1块GPU
./darknet -nogpu detect cfg/yolov3.cfg yolov3.weights data/dog.jpg #-nogpu:不使用GPU
 
#在darknet文件夹下的predictions.jpg即是检测后的结果

1 处理单张图片

目标:指定一张图片的路径,指定检测生成的新图片的存放路径,进行 model检测+画框+另存为新图片.

from ctypes import *
import math
import random
import cv2
import os


def sample(probs):
    s = sum(probs)
    probs = [a / s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs) - 1


def c_array(ctype, values):
    arr = (ctype * len(values))()
    arr[:] = values
    return arr


class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]


class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]


class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]


lib = CDLL("/home/xxx/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)


# net_d = load_net(b"../cfg/yolov3.cfg", b"../yolov3.weights", 0)
# meta_d = load_meta(b"../cfg/coco.data")


def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res


def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res


def detect_and_boxing(net, meta, b_path, raw_path, save_path,
                      color=(0.255, 255), line_type=1):
    image = cv2.imread(raw_path)
    r = detect(net, meta, b_path)
    if not len(r) > 0:
        print("nothing detected in this picture!")
    else:
        for i in range(len(r)):
            box_i = r[i]
            label_i = box_i[0]
            prob_i = box_i[1]
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]
            text_ = str(label_i) + "," + str(round(prob_i, 3))

            cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
            cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)
            cv2.imwrite(save_path, image)
            print("boxing ", i, " found ", label_i, "with prob = ", prob_i, ", finished!")
            print("box position is :", box_i[2])


if __name__ == "__main__":
    # net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0)
    # im = load_image("data/wolf.jpg", 0, 0)
    # meta = load_meta("cfg/imagenet1k.data")
    # r = classify(net, meta, im)
    # print(r)

    net = load_net(b"/home/xxx/darknet/cfg/yolov3.cfg", b"/home/xxx/darknet/yolov3.weights", 0)
    meta = load_meta(b"/home/xxx/darknet/cfg/coco.data")
    b_path = b"/home/xxx/darknet/data/dog.jpg"
    raw_path = "/home/xxx/darknet/data/dog.jpg"
    save_path = "/home/xxx/darknet/predictdemo.jpg"
    detect_and_boxing(net, meta, b_path=b_path, raw_path=raw_path, save_path=save_path)

核心改动处 :

# lib = CDLL("libdarknet.so", RTLD_GLOBAL)   , 改成自己的项目的具体地址
使用 cv2.rectangle  画框框, 使用cv2.putText 放文字,为了避免框框和文字交叉,我加了一丢丢的偏移量。

其中 yolov3的输出是 :

label_i = box_i[0] #标签
prob_i = box_i[1] #标签置信度
x_ = box_i[2][0]
y_ = box_i[2][1]
w_ = box_i[2][2]
h_ = box_i[2][3] # bbox信息(x,y,w,h)为物体的中心位置相对格子位置的偏移及宽度和高度,

cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
(int(x_ + w_ / 2), int(y_ + h_ / 2)),
color, line_type)
cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
2)

2 视频格式

目的:指定原视频的路径,指定新視頻和中间产生的临时帧的存放位置,进行 帧获取+ model检测+画框+另存为新帧

原作者将生成的图片重新生成视频格式, 我建议采用其他方式比如opencv 或者ffmpage.

from ctypes import *
import math
import random
import cv2
import os


def sample(probs):
    s = sum(probs)
    probs = [a / s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs) - 1


def c_array(ctype, values):
    arr = (ctype * len(values))()
    arr[:] = values
    return arr


class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]


class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]


class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]


lib = CDLL("/home/xxx/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)

net_d = load_net(b"/home/xxx/darknet/cfg/yolov3.cfg", b"/home/xxx/darknet/yolov3.weights", 0)
meta_d = load_meta(b"/home/xxx/darknet/cfg/coco.data")


def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res


def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res


# calc all box , red label for the biggest one ,yellow label for the rest, save the img to a specific path
def detect_and_boxing_default(b_path, raw_path, save_path,
                              color=(0.255, 255), line_type=1):
    print("checking pic...", raw_path)
    image = cv2.imread(raw_path)
    r = detect(net_d, meta_d, b_path)
    if not len(r) > 0:
        print("nothing detected in this picture!")
    else:
        print(len(r), " stuff detected in this picture! boxing...")
        print("going to save as :", save_path)
        for i in range(len(r)):
            box_i = r[i]
            label_i = box_i[0]
            prob_i = box_i[1]
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]
            text_ = str(label_i) + "," + str(round(prob_i, 3))

            cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
            cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)
            cv2.imwrite(save_path, image)


def video_to_pics(video_path='/home/jiantang/work_data/sample_video.avi',
                  video_out_path='/home/jiantang/work_data/'):
    print("video_to_pics start...")
    vc = cv2.VideoCapture(video_path)
    c = 1
    if vc.isOpened():
        rval, frame = vc.read()
    else:
        print('open error!')
        rval = False
    count_c = 1
    while rval:
        rval, frame = vc.read()
        if rval:
            print("dealing with frame : ", count_c)
            cv2.imwrite(video_out_path + str(int(c)) + '.jpg', frame)
        c += 1
        cv2.waitKey(1)
        count_c += 1
    vc.release()
    print("video_to_pics finished...")


def pics_boxing(pics_path, save_path):
    raw_save_path = save_path
    print("pics_boxing start...")
    print("checking path : ", pics_path)
    pics_names = os.listdir(pics_path)
    print("found pics num :", len(pics_names))
    count_c = 1
    for name in pics_names:
        print("dealing with pics ", count_c)
        raw_path = pics_path + "/" + name
        b_path = bytes(raw_path, encoding="utf8")
        save_path = raw_save_path + "/" + name
        detect_and_boxing_default(b_path, raw_path, save_path)
        count_c += 1
    print("pics_boxing finished...")


def pics_to_video(pics_path, video_new_path='/home/jiantang/work_data/sample_video_new.avi', ):
    print("pics_to_video start...")
    print("checking files in :", pics_path)
    file_list = os.listdir(pics_path)
    # remove non-jpg files, remove  .jpg  sign
    tmp_jpg = []
    for name in file_list:
        if not name.endswith('.jpg'):
            print("found sth called:", name, ", skip it.")
            file_list.remove(name)
            continue
        tmp_jpg.append(name.replace(".jpg", ""))
    # sort names
    tmp_jpg.sort(key=int)

    fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')  # 设置输出视频为avi格式
    # cap_fps是帧率,可以根据随意设置;size要和图片的size一样,但是通过img.shape得到图像的参数是
    # (height,width,channel),但是此处的size要传的是(width,height),这里一定要注意注意,
    # 不然结果会打不开,提示“无法解码多工传送的流”等.比如通过img.shape得到常用的图片尺寸
    # (1080,1920,3),则size设为(1920,1080)
    cap_fps = 50
    size = (1920, 1080)
    # 设置视频输出的参数
    video = cv2.VideoWriter(video_new_path, fourcc, cap_fps, size)
    # video.write默认保存彩色图,如果是彩色图,则直接保存
    for name in tmp_jpg:
        img_E = cv2.imread(pics_path + "/" + name + ".jpg")
        print("reading....")
        video.write(img_E)
    video.release()
    print("pics_to_video finished...")


video_path = '/home/xxx/darknet/car.mp4'
video_out_path = '/home/xxx/darknet/output/pics/'
video_out_dir = '/home/xxx/darknet/output/pics'
video_out_new_path = '/home/xxx/darknet/output/pics_new'
video_new_path = '/home/xxx/darknet/output/pics_new/sample_video_new.avi'

video_to_pics(video_path, video_out_path)
pics_boxing(video_out_dir, video_out_new_path)
# pics_to_video(video_out_new_path, video_new_path)

3. rtsp码流格式

在YOLO官网提供的Darknet源码中,有一个使用python接口的示例程序 darknet.py
使用是需要简单的修改.

if __name__ == "__main__":
    net = load_net(b"/home/xxx/darknet/cfg/yolov3.cfg", b"/home/xxx/darknet/yolov3.weights", 0)
    meta = load_meta(b"/home/xxx/darknet/cfg/coco.data")
    r = detect(net, meta, b"/home/xxx/darknet/data/dog.jpg")
    print(r)

看detect函数的定义

def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
    for i in range(meta.classes):
        if dets[j].prob[i] > 0:
            b = dets[j].bbox
            res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
res = sorted(res, key=lambda x: -x[1])
free_image(im)
free_detections(dets, num)
return res

传入detect函数的第三个参数image是字符串类型,即路径,随后调用load_image函数加载图片得到im对象,(注意这个im是Darknet中自定义的结构体image类型,定义在include/darknet.h中,该文件的详细注释在群内有分享。)再进行后续处理,那么我们改写接口的时候就可以直接将第三个参数改为image类型。做如下的修改:

  1. 在darkenet.py中自定义一个函数,内容如下
def nparray_to_image(img):
    data = img.ctypes.data_as(POINTER(c_ubyte))
    image = ndarray_image(data, img.ctypes.shape, img.ctypes.strides)
    return image
  1. 在darknet.py中增加如下行代码,增加位置找到类似代码的位置就好
ndarray_image = lib.ndarray_to_image
ndarray_image.argtypes = [POINTER(c_ubyte), POINTER(c_long), POINTER(c_long)]
ndarray_image.restype = IMAGE
  1. 在image.c中增加如下代码段,我新增的部分在OPENCV定义后面,灵活修改就好
int show_image(image p, const char *name, int ms)
{
#ifdef OPENCV
    int c = show_image_cv(p, name, ms);
    return c;
#else
    fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name);
    save_image(p, name);
    return -1;
#endif
}

//modify by liu  新增部分
#ifdef NUMPY
image ndarray_to_image(unsigned char* src, long* shape, long* strides)
{
    int h = shape[0];
    int w = shape[1];
    int c = shape[2];
    int step_h = strides[0];
    int step_w = strides[1];
    int step_c = strides[2];
    image im = make_image(w, h, c);
    int i, j, k;
    int index1, index2 = 0;
for(i = 0; i < h; ++i){
        for(k= 0; k < c; ++k){
            for(j = 0; j < w; ++j){

                index1 = k*w*h + i*w + j;
                index2 = step_h*i + step_w*j + step_c*k;
                //fprintf(stderr, "w=%d h=%d c=%d step_w=%d step_h=%d step_c=%d \n", w, h, c, step_w, step_h, step_c);
                //fprintf(stderr, "im.data[%d]=%u data[%d]=%f \n", index1, src[index2], index2, src[index2]/255.);
                im.data[index1] = src[index2]/255.;
            }
        }
    }

rgbgr_image(im);

return im;
}
#endif
  1. 在image.h的19行后面加如下代码
#ifdef NUMPY
image ndarray_to_image(unsigned char* src, long* shape, long* strides);
#endif
  1. 在makefile的47行后面中加如下代码
ifeq ($(NUMPY), 1) 
COMMON+= -DNUMPY -I/usr/include/python2.7/ -I/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/
CFLAGS+= -DNUMPY
endif

前几行加 一条 NUMPY=1,增加后为:

GPU=1
CUDNN=1
OPENCV=1
OPENMP=0
NUMPY=1
DEBUG=0
  1. 重新编译make clean + make

  2. 配置结束

from ctypes import *
import random
import cv2


def sample(probs):
    s = sum(probs)
    probs = [a / s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs) - 1


def c_array(ctype, values):
    arr = (ctype * len(values))()
    arr[:] = values
    return arr


class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]


class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]


class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]


lib = CDLL("/home/huadian/darknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

ndarray_image = lib.ndarray_to_image
ndarray_image.argtypes = [POINTER(c_ubyte), POINTER(c_long), POINTER(c_long)]
ndarray_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

# modify by liu
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)


def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res


def detect(net, meta, im, thresh=.5, hier_thresh=.5, nms=.45):
    # im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res

# modify by liu
def nparray_to_image(img):
    data = img.ctypes.data_as(POINTER(c_ubyte))
    image = ndarray_image(data, img.ctypes.shape, img.ctypes.strides)
    return image


# calc all box , red label for the biggest one ,yellow label for the rest, save the img to a specific path
def detect_and_boxing(net, meta, b_path, raw_path, save_path,
                      color=(0.255, 255), line_type=1):
    image = cv2.imread(raw_path)
    r = detect(net, meta, b_path)
    if not len(r) > 0:
        print("nothing detected in this picture!")
    else:
        for i in range(len(r)):
            box_i = r[i]
            label_i = box_i[0]
            prob_i = box_i[1]
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]
            text_ = str(label_i) + "," + str(round(prob_i, 3))

            cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
            cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)
            cv2.imwrite(save_path, image)
            print("boxing ", i, " found ", label_i, "with prob = ", prob_i, ", finished!")
            print("box position is :", box_i[2])


if __name__ == "__main__":
    net = load_net(b"/home/xxx/darknet/cfg/yolov3.cfg", b"/home/huadian/darknet/yolov3.weights", 0)
    meta = load_meta(b"/home/xxx/darknet/cfg/coco.data")
    # 使用rtsp格式播放
    vid = cv2.VideoCapture('rtsp://admin:liupeng199188@192.168.12.100/Streaming/Channels/1801')
    
    while True:
        return_value, arr = vid.read()
        im = nparray_to_image(arr)
        r = detect(net, meta, im)
        print(r)
        print("this frame ok!")
    # image = cv2.imread("/home/xxx/darknet/data/dog.jpg")
    # image = nparray_to_image(image)
    # r = detect(net, meta, image)
    # print(r)

  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值