rk3588 视频类型判断 超分

import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
# from rknnpool import rknnPoolExecutor

import platform
from rknnlite.api import RKNNLite


from rknnpool import rknnPoolExecutor
from func import myFunc

# decice tree for RK356x/RK3588
DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible'

RK3588_RKNN_MODEL = '/home/tyzc/RK_NPU_SDK_1.5.0/rknn-toolkit2/examples/onnx/yolov5/yolov5s_640_640_cut.rknn'
IMG_PATH = '/home/tyzc/RK_NPU_SDK_1.5.0/rknn-toolkit2/examples/onnx/yolov5/bus.jpg'
DATASET = '/home/tyzc/RK_NPU_SDK_1.5.0/rknn-toolkit2/examples/onnx/yolov5/dataset.txt'


OBJ_THRESH = 0.25
NMS_THRESH = 0.45
IMG_SIZE_HEIGHT = 640
IMG_SIZE_WEIGHT = 640
IMG_SIZE = 640

CLASSES = ("person", "cartoon")
video_path = "/home/tyzc/0416_syl_chaofen/rknn-multi-threaded-Super-Resolution-syl/cartorn/1怀旧动漫社聪明的一休_360_640.mp4"
cap = cv2.VideoCapture(video_path)
modelPath = "/home/tyzc/0416_syl_chaofen/rknn-multi-threaded-Super-Resolution-syl/rknnModel/32_1_16_4channel_123conv_1rdb1_net_oneresize_no_convhr_360_320_20240407_36.0778.rknn"
modelPath_1 = "/home/tyzc/0416_syl_chaofen/rknn-multi-threaded-Super-Resolution-syl/rknnModel/mix_0925/mix_32_1_16_4channel_123conv_1rdb1_net_oneresize_no_convhr_360_320_31.15..rknn"
# modelPath_1 = "/home/tyzc/0416_syl_chaofen/rknn-multi-threaded-Super-Resolution-syl/rknnModel/32_1_16_4channel_123conv_1rdb1_net_oneresize_no_convhr_360_320_20240407_36.0778.rknn"

width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("视频分辨率:{} x {}".format(height, width))
# modelPath = "/home/tyzc/Downloads/rknn-toolkit2-master/rknn-multi-threaded-nosigmoid/rknnModel/8_1_4/8_1_4_net_g_160000_Y_360_640_doqu.rknn"
# 线程数, 增大可提高帧率
TPEs = 6


# 初始化rknn池
  
pool = rknnPoolExecutor(
    rknnModel=modelPath,
    TPEs=TPEs,
    func=myFunc)
pool_1 = rknnPoolExecutor(
    rknnModel=modelPath,
    TPEs=TPEs,
    func=myFunc)

def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def xywh2xyxy(x):
    # Convert [x, y, w, h] to [x1, y1, x2, y2]
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
    return y


def process(input, mask, anchors):

    anchors = [anchors[i] for i in mask]
    grid_h, grid_w = map(int, input.shape[0:2])

    box_confidence = sigmoid(input[..., 4])
    box_confidence = np.expand_dims(box_confidence, axis=-1)

    box_class_probs = sigmoid(input[..., 5:])

    box_xy = sigmoid(input[..., :2])*2 - 0.5

    col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
    row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
    col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
    row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
    grid = np.concatenate((col, row), axis=-1)
    box_xy += grid
    box_xy *= int(IMG_SIZE/grid_h)

    box_wh = pow(sigmoid(input[..., 2:4])*2, 2)
    box_wh = box_wh * anchors

    box = np.concatenate((box_xy, box_wh), axis=-1)

    return box, box_confidence, box_class_probs


def filter_boxes(boxes, box_confidences, box_class_probs):
    """Filter boxes with box threshold. It's a bit different with origin yolov5 post process!

    # Arguments
        boxes: ndarray, boxes of objects.
        box_confidences: ndarray, confidences of objects.
        box_class_probs: ndarray, class_probs of objects.

    # Returns
        boxes: ndarray, filtered boxes.
        classes: ndarray, classes for boxes.
        scores: ndarray, scores for boxes.
    """
    boxes = boxes.reshape(-1, 4)
    box_confidences = box_confidences.reshape(-1)
    box_class_probs = box_class_probs.reshape(-1, box_class_probs.shape[-1])

    _box_pos = np.where(box_confidences >= OBJ_THRESH)
    boxes = boxes[_box_pos]
    box_confidences = box_confidences[_box_pos]
    box_class_probs = box_class_probs[_box_pos]

    class_max_score = np.max(box_class_probs, axis=-1)
    classes = np.argmax(box_class_probs, axis=-1)
    _class_pos = np.where(class_max_score >= OBJ_THRESH)

    boxes = boxes[_class_pos]
    classes = classes[_class_pos]
    scores = (class_max_score* box_confidences)[_class_pos]

    return boxes, classes, scores


def nms_boxes(boxes, scores):
    """Suppress non-maximal boxes.

    # Arguments
        boxes: ndarray, boxes of objects.
        scores: ndarray, scores of objects.

    # Returns
        keep: ndarray, index of effective boxes.
    """
    x = boxes[:, 0]
    y = boxes[:, 1]
    w = boxes[:, 2] - boxes[:, 0]
    h = boxes[:, 3] - boxes[:, 1]

    areas = w * h
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)

        xx1 = np.maximum(x[i], x[order[1:]])
        yy1 = np.maximum(y[i], y[order[1:]])
        xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
        yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])

        w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
        h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
        inter = w1 * h1

        ovr = inter / (areas[i] + areas[order[1:]] - inter)
        inds = np.where(ovr <= NMS_THRESH)[0]
        order = order[inds + 1]
    keep = np.array(keep)
    return keep


def yolov5_post_process(input_data):
    masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
    anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
               [59, 119], [116, 90], [156, 198], [373, 326]]

    boxes, classes, scores = [], [], []
    for input, mask in zip(input_data, masks):
        b, c, s = process(input, mask, anchors)
        b, c, s = filter_boxes(b, c, s)
        boxes.append(b)
        classes.append(c)
        scores.append(s)

    boxes = np.concatenate(boxes)
    boxes = xywh2xyxy(boxes)
    classes = np.concatenate(classes)
    scores = np.concatenate(scores)

    nboxes, nclasses, nscores = [], [], []
    for c in set(classes):
        inds = np.where(classes == c)
        b = boxes[inds]
        c = classes[inds]
        s = scores[inds]

        keep = nms_boxes(b, s)

        nboxes.append(b[keep])
        nclasses.append(c[keep])
        nscores.append(s[keep])

    if not nclasses and not nscores:
        return None, None, None

    boxes = np.concatenate(nboxes)
    classes = np.concatenate(nclasses)
    scores = np.concatenate(nscores)

    return boxes, classes, scores


def draw(image, boxes, scores, classes):
    """Draw the boxes on the image.

    # Argument:
        image: original image.
        boxes: ndarray, boxes of objects.
        classes: ndarray, classes of objects.
        scores: ndarray, scores of objects.
        all_classes: all classes name.
    """
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}'.format(CLASSES[cl], score))
        print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
        top = int(top)
        left = int(left)
        right = int(right)
        bottom = int(bottom)

        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left - 6),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 0, 255), 2)


def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, ratio, (dw, dh)


def get_host():
    # get platform and device type
    system = platform.system()
    machine = platform.machine()
    os_machine = system + '-' + machine
    if os_machine == 'Linux-aarch64':
        try:
            with open(DEVICE_COMPATIBLE_NODE) as f:
                device_compatible_str = f.read()
                if 'rk3588' in device_compatible_str:
                    host = 'RK3588'
                elif 'rk3562' in device_compatible_str:
                    host = 'RK3562'
                else:
                    host = 'RK3566_RK3568'
        except IOError:
            print('Read device node {} failed.'.format(DEVICE_COMPATIBLE_NODE))
            exit(-1)
    else:
        host = os_machine
    return host


if __name__ == '__main__':
    
    # Get device information
    host_name = get_host()
    if host_name == 'RK3588':
        rknn_model = RK3588_RKNN_MODEL
    else:
        print("This demo cannot run on the current platform: {}".format(host_name))
        exit(-1)

    rknn_lite = RKNNLite()
    # Load RKNN model
    print('--> Load RKNN model')
    ret = rknn_lite.load_rknn(rknn_model)
    if ret != 0:
        print('Load RKNN model failed')
        exit(ret)
    print('done')
    # Init runtime environment
    print('--> Init runtime environment')
    # run on RK356x/RK3588 with Debian OS, do not need specify target.
    if host_name == 'RK3588':
        # For RK3588, specify which NPU core the model runs on through the core_mask parameter.
        ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2)
        # ret = rknn_lite.init_runtime()
    else:
        ret = rknn_lite.init_runtime()
    if ret != 0:
        print('Init runtime environment failed')
        exit(ret)
    print('done')



    frames, loopTime, initTime = 0, time.time(), time.time()
    all_read_time = 0 
    all_show_time = 0
    person_num = 0
    cartoon_num = 0
    while (cap.isOpened()):
        if person_num + cartoon_num <= 10:
            real_time_start = time.time()
            # all_read_time = all_read_time + (read_time_2 - read_time_1) 
            frames += 1
            ret, frame = cap.read()
            frame_h, frame_w, c = frame.shape
            
            if not ret:
                break       
            # Set inputs
            img = frame
            # img, ratio, (dw, dh) = letterbox(img, new_shape=(IMG_SIZE, IMG_SIZE))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

            # Inference
            print('--> Running model')
            # outputs = rknn_lite.inference(inputs=[img], data_format=['nchw'])
            inference_time_start = time.time()
            outputs = rknn_lite.inference(inputs=[img])
            inference_time_end = time.time()
            print("-" * 60)
            print(img.shape)
            print(inference_time_end - inference_time_start)

            # post process
            input0_data = outputs[0]
            input1_data = outputs[1]
            input2_data = outputs[2]

            input0_data = input0_data.reshape([3, -1]+list(input0_data.shape[-2:]))
            input1_data = input1_data.reshape([3, -1]+list(input1_data.shape[-2:]))
            input2_data = input2_data.reshape([3, -1]+list(input2_data.shape[-2:]))

            input_data = list()
            input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
            input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
            input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))

            boxes, classes, scores = yolov5_post_process(input_data)

            img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            # 统计0和1的数目
            if boxes is not None:
                count_0 = np.count_nonzero(classes == 0)
                count_1 = np.count_nonzero(classes == 1)
                person_num = person_num + count_0
                cartoon_num = cartoon_num + count_1
                draw(img_1, boxes, scores, classes)
                # cv2.imwrite('result.jpg', img_1)
            # 释放资源
            show_time_1 = time.time()

            # 设置文字、字体、大小、颜色等
            text = "person_num: " + str(person_num) + "  " + 'cartoon_num: ' + str(cartoon_num)
            org = (50, 100)  # 文字放置的坐标 (x, y)
            font = cv2.FONT_HERSHEY_SIMPLEX  # 字体类型
            font_scale = 1.0  # 字体大小
            color = (0, 0, 255)  # 文字颜色,白色 (B, G, R)
            thickness = 2  # 文字线条粗细

            frame_resize_show = cv2.resize(frame, (frame.shape[1] * 2, frame.shape[0] * 2), 2, 2)
            # 在图片上写文字
            cv2.putText(frame_resize_show, text, org, font, font_scale, color, thickness)

            cv2.imshow('test', frame_resize_show)
            show_time_2 = time.time()
            if cv2.waitKey(1) & 0xFF == ord('q'):
                # out.release()
                break
            print("person_num: " + str(person_num) + "  " + 'cartoon_num: ' + str(cartoon_num))
            real_time_end = time.time()
            print("-"*24)
            print(real_time_end - real_time_start) 
        else:
            rknn_lite.release()
            if cartoon_num > person_num:
                # 初始化异步所需要的帧
                pool_frames = []
                if (cap.isOpened()):
                    for i in range(TPEs + 1):
                        ret, frame = cap.read()
                        frame_cf = frame[:,:320,]
                        frame_resize = frame[:,320:,]
                        if not ret:
                            cap.release()
                            del pool
                            exit(-1)
                        pool_frames.append(frame_resize)
                        pool.put(frame_cf)
                while True:
                    frames += 1
                    ret, frame = cap.read()
                    if not ret:
                        break  
                    frame_resize = frame[:,:320,]
                    frame_cf = frame[:,320:,]
                    read_time_2 = time.time()     
            
                    if frames % 100000000 == 0:
                        # 调整图像大小
                        frame = cv2.resize(frame, (frame.shape[1] * 2, frame.shape[0] * 2), 2, 2)
                    else:
                        pool.put(frame_cf)
                        pool_frames.append(frame_resize)
                        frame_cf, flag = pool.get()
                        frame_resize = cv2.resize(pool_frames.pop(0), (frame.shape[1], frame.shape[0] * 2), 2, 2)
                        # 创建一个空白帧,大小与输入视频相同
                        result_frame = np.zeros((height*2, width*2, 3), dtype=np.uint8)
                        # 将左半部分放置在左边
                        result_frame[:, :width] = frame_resize
                        # 将右半部分放置在右边
                        result_frame[:, width:] = frame_cf
                        result_frame[:, width:width+1, :] = (255, 255, 255)
                        frame = result_frame
                        if flag == False:
                            break
                        show_time_1 = time.time()
                        # 设置文字、字体、大小、颜色等
                        text = "is_cartoon"
                        org = (50, 100)  # 文字放置的坐标 (x, y)
                        font = cv2.FONT_HERSHEY_SIMPLEX  # 字体类型
                        font_scale = 1.0  # 字体大小
                        color = (0, 0, 255)  # 文字颜色,白色 (B, G, R)
                        thickness = 2  # 文字线条粗细

                        # 在图片上写文字
                        cv2.putText(frame, text, org, font, font_scale, color, thickness)
                        cv2.imshow('test', frame)
                        show_time_2 = time.time()
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            # out.release()
                            break

                        if frames % 30 == 0:
                            print("30帧平均帧率:\t", 30 / (time.time() - loopTime - all_read_time - all_show_time), "帧")
                            loopTime = time.time()
                            all_read_time = 0 
                            all_show_time = 0
                        
            elif person_num > cartoon_num:
                # 初始化异步所需要的帧
                pool_frames = []
                if (cap.isOpened()):
                    for i in range(TPEs + 1):
                        ret, frame = cap.read()
                        frame_cf = frame[:,:320,]
                        frame_resize = frame[:,320:,]
                        if not ret:
                            cap.release()
                            del pool
                            exit(-1)
                        pool_frames.append(frame_resize)
                        pool_1.put(frame_cf)
                while True:
                    frames += 1
                    ret, frame = cap.read()
                    if not ret:
                        break  

                    frame_resize = frame[:,:320,]
                    frame_cf = frame[:,320:,]
                    read_time_2 = time.time()     
            
                    if frames % 100000000 == 0:
                        # 调整图像大小
                        frame = cv2.resize(frame, (frame.shape[1] * 2, frame.shape[0] * 2), 2, 2)
                    else:
                        pool_1.put(frame_cf)
                        pool_frames.append(frame_resize)
                        frame_cf, flag = pool_1.get()
                        frame_resize = cv2.resize(pool_frames.pop(0), (frame.shape[1], frame.shape[0] * 2), 2, 2)
                        # 创建一个空白帧,大小与输入视频相同
                        result_frame = np.zeros((height*2, width*2, 3), dtype=np.uint8)

                        # 将左半部分放置在左边
                        result_frame[:, :width] = frame_resize
                        # 将右半部分放置在右边
                        result_frame[:, width:] = frame_cf
                        result_frame[:, width:width+1, :] = (255, 255, 255)
                        frame = result_frame
                        if flag == False:
                            break
                        show_time_1 = time.time()
                        # 设置文字、字体、大小、颜色等
                        text = "is_person"
                        org = (50, 100)  # 文字放置的坐标 (x, y)
                        font = cv2.FONT_HERSHEY_SIMPLEX  # 字体类型
                        font_scale = 1.0  # 字体大小
                        color = (0, 0, 255)  # 文字颜色,白色 (B, G, R)
                        thickness = 2  # 文字线条粗细
                        # 在图片上写文字
                        cv2.putText(frame, text, org, font, font_scale, color, thickness)
                        cv2.imshow('test', frame)
                        show_time_2 = time.time()
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            # out.release()
                            break

                        if frames % 30 == 0:
                            print("30帧平均帧率:\t", 30 / (time.time() - loopTime - all_read_time - all_show_time), "帧")
                            loopTime = time.time()
                            all_read_time = 0 
                            all_show_time = 0
    print("总平均帧率\t", frames / (time.time() - initTime))
    # 释放cap和rknn线程池
    cap.release()
    # out.release()
    cv2.destroyAllWindows()
    pool_1.release()
    pool.release()

# 高校智慧校园解决方案摘要 智慧校园解决方案是针对高校信息化建设的核心工程,旨在通过物联网技术实现数字化校园的智能化升级。该方案通过融合计算机技术、网络通信技术、数据库技术和IC卡识别技术,初步实现了校园一卡通系统,进而通过人脸识别技术实现了更精准的校园安全管理、生活管理、教务管理和资源管理。 方案包括多个管理系统:智慧校园管理平台、一卡通卡务管理系统、一卡通人脸库管理平台、智能人脸识别消费管理系统、疫情防控管理系统、人脸识别无感识别管理系统、会议签到管理系统、人脸识别通道管理系统和图书馆对接管理系统。这些系统共同构成了智慧校园的信息化基础,通过统一数据库和操作平台,实现了数据共享和信息一致性。 智能人脸识别消费管理系统通过人脸识别终端,在无需接触的情况下快速完成消费支付过程,提升了校园服务效率。疫情防控管理系统利用热成像测温技术、视频智能分析等手段,实现了对校园人员体温监测和疫情信息实时上报,提高了校园公共卫生事件的预防和控制能力。 会议签到管理系统和人脸识别通道管理系统均基于人脸识别技术,实现了会议的快速签到和图书馆等场所的高效通行管理。与图书馆对接管理系统实现了一卡通系统与图书馆管理系统的无缝集成,提升了图书借阅的便捷性。 总体而言,该智慧校园解决方案通过集成的信息化管理系统,提升了校园管理的智能化水平,优化了校园生活体验,增强了校园安全,并提高了教学和科研的效率。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值