22222222222

(py37) root@orangepi5plus:/home/orangepi# pip install rknn_toolkit_lite2-1.6.0-cp39-cp39-linux_aarch64.whl
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple
ERROR: rknn_toolkit_lite2-1.6.0-cp39-cp39-linux_aarch64.whl is not a supported wheel on this platform.
 

WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
 

    def forward(self, x):
        """Processes input through YOLOv5 layers, altering shape for detection: `x(bs, 3, ny, nx, 85)`."""
        z = []  # inference output
        for i in range(self.nl):
            x[i] = self.m[i](x[i])  # conv
            bs, _, ny, nx = x[i].shape  # x(bs,255,20,20) to x(bs,3,20,20,85)
            x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()

            if not self.training:  # inference
                if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
                    self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)

                if isinstance(self, Segment):  # (boxes + masks)
                    xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
                    xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i]  # xy
                    wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i]  # wh
                    y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
                else:  # Detect (boxes only)
                    xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
                    xy = (xy * 2 + self.grid[i]) * self.stride[i]  # xy
                    wh = (wh * 2) ** 2 * self.anchor_grid[i]  # wh
                    y = torch.cat((xy, wh, conf), 4)
                z.append(y.view(bs, self.na * nx * ny, self.no))

        return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
    shape = tuple((y[0] if isinstance(y, tuple) else y).shape)  # model output shape
--> Load RKNN model
--> Init runtime environment
I RKNN: [19:08:31.281] RKNN Runtime Information: librknnrt version: 1.4.0 (a10f100eb@2022-09-09T09:07:14)
I RKNN: [19:08:31.281] RKNN Driver Information: version: 0.9.3
I RKNN: [19:08:31.282] RKNN Model Information: version: 1, toolkit version: 1.4.0-22dcfef4(compiler version: 1.4.0 (3b4520e4f@2022-09-05T20:52:35)), target: RKNPU lite, target platform: rk3566, framework name: ONNX, framework layout: NCHW
E RKNN: [19:08:31.282] failed to check rknpu hardware version: 0x46495245
E RKNN: [19:08:31.282] This rknn model is for RK3566, but current platform is RK3588
E RKNN: [19:08:31.282] rknn_init, load model failed!
E Catch exception when init runtime!
E Traceback (most recent call last):
  File "/root/miniconda3/envs/py37/lib/python3.7/site-packages/rknnlite/api/rknn_lite.py", line 148, in init_runtime
    self.rknn_runtime.build_graph(self.rknn_data, self.load_model_in_npu)
  File "rknnlite/api/rknn_runtime.py", line 840, in rknnlite.api.rknn_runtime.RKNNRuntime.build_graph
Exception: RKNN init failed. error code: RKNN_ERR_FAIL

Init runtime environment failed!
(py37) root@orangepi5plus:~/Desktop/1# python deploy.py
--> Load RKNN model
--> Init runtime environment
I RKNN: [19:19:47.577] RKNN Runtime Information: librknnrt version: 1.4.0 (a10f100eb@2022-09-09T09:07:14)
I RKNN: [19:19:47.577] RKNN Driver Information: version: 0.9.3
I RKNN: [19:19:47.578] RKNN Model Information: version: 1, toolkit version: 1.4.0-22dcfef4(compiler version: 1.4.0 (3b4520e4f@2022-09-05T20:52:35)), target: RKNPU v2, target platform: rk3588, framework name: ONNX, framework layout: NCHW
done
E RKNN: [19:19:53.997] failed to submit!, op id: 1, op name: Conv:Conv_0, flags: 0x5, task start: 1565, task number: 24, run task counter: 0, int status: 0

I RKNN: [19:52:47.502] RKNN Runtime Information: librknnrt version: 1.4.0 (a10f100eb@2022-09-09T09:07:14)
I RKNN: [19:52:47.502] RKNN Driver Information: version: 0.9.3
I RKNN: [19:52:47.503] RKNN Model Information: version: 1, toolkit version: 1.4.0-22dcfef4(compiler version: 1.4.0 (3b4520e4f@2022-09-05T20:52:35)), target: RKNPU v2, target platform: rk3588, framework name: ONNX, framework layout: NCHW
done
class: person, score: 0.8242685794830322
box coordinate left,top,right,down: [40.25714874267578, 237.35836744308472, 196.25714874267578, 538.5347599983215]
class: person, score: 0.821617066860199
box coordinate left,top,right,down: [174.68169045448303, 243.873877286911, 273.0628468990326, 504.6404049396515]
class: person, score: 0.6177416443824768
box coordinate left,top,right,down: [-0.14884090423583984, 326.7828435897827, 68.03468990325928, 522.2842950820923]
class: person, score: 0.43042677640914917
box coordinate left,top,right,down: [536.4923242330551, 203.18724250793457, 637.4496923685074, 522.7547740936279]
class: bus , score: 0.19925792515277863
box coordinate left,top,right,down: [3.378205358982086, 132.60259491205215, 640.2980641722679, 433.29669708013535]
class: train, score: 0.23023268580436707
box coordinate left,top,right,down: [1.4123240113258362, 141.0, 638.3321828246117, 467.0]
(py37) root@orangepi5plus:~/Desktop/1# python demo.py
--> Load RKNN model
--> Init runtime environment
I RKNN: [21:03:30.525] RKNN Runtime Information: librknnrt version: 1.4.0 (a10f100eb@2022-09-09T09:07:14)
I RKNN: [21:03:30.525] RKNN Driver Information: version: 0.9.3
I RKNN: [21:03:30.526] RKNN Model Information: version: 1, toolkit version: 1.4.0-22dcfef4(compiler version: 1.4.0 (3b4520e4f@2022-09-05T20:52:35)), target: RKNPU v2, target platform: rk3588, framework name: ONNX, framework layout: NCHW
done
Segmentation fault (core dumped)
video_file_path="2.mp4"
cap = cv2.VideoCapture(video_file_path)
 
# Check if camera opened successfully
if (cap.isOpened()== False): 
  print("Error opening video stream or file")
 
# Read until video is completed
while(cap.isOpened()):
    start = dt.datetime.utcnow()
    # Capture frame-by-frame
    ret, img = cap.read()
    if not ret:
        break
    
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
    img = np.expand_dims(img,0)
    
    # Inference
    #print('--> Running model')
    outputs = rknn.inference(inputs=[img])
    #print('done')
 
    # post process
    input0_data = outputs[0]
    input1_data = outputs[1]
    input2_data = outputs[2]
 
    input0_data = input0_data.reshape([3, -1]+list(input0_data.shape[-2:]))
    input1_data = input1_data.reshape([3, -1]+list(input1_data.shape[-2:]))
    input2_data = input2_data.reshape([3, -1]+list(input2_data.shape[-2:]))
 
    input_data = list()
    input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
    input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
    input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))
 
    boxes, classes, scores = yolov5_post_process(input_data)
    duration = dt.datetime.utcnow() - start
    fps = round(1000000 / duration.microseconds)
 
    # draw process result and fps
    img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.putText(img_1, f'fps: {fps}',
            (20, 20),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.6, (0, 125, 125), 2)
    if boxes is not None:
        draw(img_1, boxes, scores, classes, fps)
 
    # show output
    cv2.imshow("post process result", img_1)
 
    # Press Q on keyboard to  exit
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break
 
# When everything done, release the video capture object
cap.release()
 
# Closes all the frames
cv2.destroyAllWindows()
if __name__ == '__main__':
   
    rknn = RKNNLite()
    
    # load RKNN model
    print('--> Load RKNN model')
    ret = rknn.load_rknn(RKNN_MODEL)
    
    # Init runtime environment
    print('--> Init runtime environment')
    # use NPU core 0 1 2
    ret = rknn.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2)
    if ret != 0:
        print('Init runtime environment failed!')
        exit(ret)
    print('done')
    
    # Create a VideoCapture object and read from input file
    # If the input is the camera, pass 0 instead of the video file name
    cap = cv2.VideoCapture(0)
    
    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")
    
    # Read until video is completed
    while (cap.isOpened()):
        start = dt.datetime.utcnow()
        # Capture frame-by-frame
        ret, img = cap.read()
        if not ret:
            break
    
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
    
        # Inference
        print('--> Running model')
        outputs = rknn.inference(inputs=[img])
        print('done')
    
        # post process
        input0_data = outputs[0]
        input1_data = outputs[1]
        input2_data = outputs[2]
    
        input0_data = input0_data.reshape([3, -1] + list(input0_data.shape[-2:]))
        input1_data = input1_data.reshape([3, -1] + list(input1_data.shape[-2:]))
        input2_data = input2_data.reshape([3, -1] + list(input2_data.shape[-2:]))
    
        input_data = list()
        input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))
    
        boxes, classes, scores = yolov5_post_process(input_data)
        duration = dt.datetime.utcnow() - start
        fps = round(10000000 / duration.microseconds)
    
        # draw process result and fps
        img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.putText(img_1, f'fps: {fps}',
                    (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 125, 125), 2)
        if boxes is not None:
            draw(img_1, boxes, scores, classes, fps)
    
        # show output
        cv2.imshow("post process result", img_1)
    
        # Press Q on keyboard to  exit
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break
    
    # When everything done, release the video capture object
    cap.release()
    
    # Closes all the frames
    cv2.destroyAllWindows()
E RKNN: [10:09:58.144] failed to submit!, op id: 1, op name: Conv:Conv_0, flags: 0x5, task start: 1565, task number: 24, run task counter: 0, int status: 0
done
# Initialize variables for FPS calculation
fps_sum = 0
frame_count = 0

# Read until video is completed
while (cap.isOpened()):
    start = dt.datetime.utcnow()
    # Capture frame-by-frame
    ret, img = cap.read()
    if not ret:
        break

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

    # Inference
    print('--> Running model')
    outputs = rknn.inference(inputs=[img])
    print('done')

    # post process
    input0_data = outputs[0]
    input1_data = outputs[1]
    input2_data = outputs[2]

    input0_data = input0_data.reshape([3, -1] + list(input0_data.shape[-2:]))
    input1_data = input1_data.reshape([3, -1] + list(input1_data.shape[-2:]))
    input2_data = input2_data.reshape([3, -1] + list(input2_data.shape[-2:]))

    input_data = list()
    input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
    input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
    input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))

    boxes, classes, scores = yolov5_post_process(input_data)
    duration = dt.datetime.utcnow() - start
    fps = round(10000000 / duration.microseconds)
    fps_sum += fps
    frame_count += 1

    # draw process result and fps
    img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.putText(img_1, f'fps: {fps}',
                (20, 20),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.6, (0, 125, 125), 2)
    if boxes is not None:
        draw(img_1, boxes, scores, classes, fps)

    # show output
    cv2.imshow("post process result", img_1)

    # Press Q on keyboard to  exit
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break

# Calculate average FPS
if frame_count > 0:
    average_fps = fps_sum / frame_count
    print(f'Average FPS: {average_fps}')

# When everything done, release the video capture object
cap.release()

# Closes all the frames
cv2.destroyAllWindows()
if __name__ == '__main__':

    rknn = RKNNLite()

    # load RKNN model
    print('--> Load RKNN model')
    ret = rknn.load_rknn(RKNN_MODEL)

    # Init runtime environment
    print('--> Init runtime environment')
    # use NPU core 0 1 2
    ret = rknn.init_runtime(core_mask=RKNNLite.NPU_CORE_0_1_2)
    if ret != 0:
        print('Init runtime environment failed!')
        exit(ret)
    print('done')

    # Create a VideoCapture object and read from input file
    # If the input is the camera, pass 0 instead of the video file name
    cap = cv2.VideoCapture(0)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # Initialize variables for FPS calculation
    fps_sum = 0
    frame_count = 0

    # Read until video is completed
    while (cap.isOpened()):
        start = dt.datetime.utcnow()
        # Capture frame-by-frame
        ret, img = cap.read()
        if not ret:
            break

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

        # Inference
        print('--> Running model')
        outputs = rknn.inference(inputs=[img])
        print('done')

        # post process
        input0_data = outputs[0]
        input1_data = outputs[1]
        input2_data = outputs[2]

        input0_data = input0_data.reshape([3, -1] + list(input0_data.shape[-2:]))
        input1_data = input1_data.reshape([3, -1] + list(input1_data.shape[-2:]))
        input2_data = input2_data.reshape([3, -1] + list(input2_data.shape[-2:]))

        input_data = list()
        input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))

        boxes, classes, scores = yolov5_post_process(input_data)
        duration = dt.datetime.utcnow() - start
        fps = round(10000000 / duration.microseconds)
        fps_sum += fps
        frame_count += 1

        # draw process result and fps
        img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        cv2.putText(img_1, f'fps: {fps}',
                    (20, 20),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 125, 125), 2)
        if boxes is not None:
            draw(img_1, boxes, scores, classes, fps)

        # show output
        cv2.imshow("post process result", img_1)

        # Press Q on keyboard to  exit
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

    # Calculate average FPS
    if frame_count > 0:
        average_fps = fps_sum / frame_count
        print(f'Average FPS: {average_fps}')

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

MUTA️

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值