基于Aidlux的自动驾驶智能预警方案

智能预警系统包括3个任务:

目标检测,可行驶区域检测,车道线检测

可行驶区域检测;主要是检查出可以行使的区域,为自动驾驶提供路径规划辅助

车道线检测:是一种环境感知应用,目的是通过车载相机或激光雷达来检测车道线

forewarming.py智能预警代码如下:

import cv2
import time
import torch
import numpy as np
import onnxruntime as ort
from PIL import Image, ImageDraw, ImageFont
from lib.core.general import non_max_suppression

onnx_path = "weights/yolop-640-640.onnx"


def resize_unscale(img, new_shape=(640, 640), color=114):
    shape = img.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    canvas = np.zeros((new_shape[0], new_shape[1], 3))
    canvas.fill(color)
    # Scale ratio (new / old) new_shape(h,w)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))  # w,h
    new_unpad_w = new_unpad[0]
    new_unpad_h = new_unpad[1]
    pad_w, pad_h = new_shape[1] - new_unpad_w, new_shape[0] - new_unpad_h  # wh padding

    dw = pad_w // 2  # divide padding into 2 sides
    dh = pad_h // 2

    if shape[::-1] != new_unpad:  # resize
        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)

    canvas[dh:dh + new_unpad_h, dw:dw + new_unpad_w, :] = img

    return canvas, r, dw, dh, new_unpad_w, new_unpad_h  # (dw,dh)


def cv2AddChineseText(img, text, position, textColor=(0, 0, 255), textSize=10):
    if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 创建一个可以在给定图像上绘图的对象
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype(
        "simsun.ttc", textSize, encoding="utf-8")
    # 绘制文本
    draw.text(position, text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)


def infer(ori_img, img, r, dw, dh, new_unpad_w, new_unpad_h):
    ort_session = ort.InferenceSession(onnx_path)
    t0 = time.time()
    # inference: (1,n,6) (1,2,640,640) (1,2,640,640)
    det_out, da_seg_out, ll_seg_out = ort_session.run(
        ['det_out', 'drive_area_seg', 'lane_line_seg'],
        input_feed={"images": img}
    )
    seconds = time.time() - t0
    fps = "%.2f fps" %(1 / seconds)  # 帧率

    det_out = torch.from_numpy(det_out).float()
    boxes = non_max_suppression(det_out)[0]  # [n,6] [x1,y1,x2,y2,conf,cls]
    boxes = boxes.cpu().numpy().astype(np.float32)
    if boxes.shape[0] == 0:
        print("no bounding boxes detected.")
        return None
    
    # scale coords to original size.
    boxes[:, 0] -= dw
    boxes[:, 1] -= dh
    boxes[:, 2] -= dw
    boxes[:, 3] -= dh
    boxes[:, :4] /= r
    print(f"detect {boxes.shape[0]} bounding boxes.")

    img_det = ori_img[:, :, ::-1].copy()
    for i in range(boxes.shape[0]):
        x1, y1, x2, y2, conf, label = boxes[i]
        x1, y1, x2, y2, label = int(x1), int(y1), int(x2), int(y2), int(label)
        img_det = cv2.rectangle(img_det, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)

    # select da & ll segment area.
    da_seg_out = da_seg_out[:, :, dh:dh + new_unpad_h, dw:dw + new_unpad_w]
    ll_seg_out = ll_seg_out[:, :, dh:dh + new_unpad_h, dw:dw + new_unpad_w]

    da_seg_mask = np.argmax(da_seg_out, axis=1)[0]
    ll_seg_mask = np.argmax(ll_seg_out, axis=1)[0]

    color_area = np.zeros((new_unpad_h, new_unpad_w, 3), dtype=np.uint8)
    color_area[da_seg_mask == 1] = [0, 255, 0]
    color_area[ll_seg_mask == 1] = [0, 0, 255]
    color_seg = color_area

    return img_det, boxes, color_seg, fps


def main(source, save_path):
    cap = cv2.VideoCapture(source)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))      # 获取视频的宽度
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))    # 获取视频的高度
    fps = cap.get(cv2.CAP_PROP_FPS)                     # 获取视频的帧率
    fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))          # 视频的编码
    #定义视频对象输出
    writer = cv2.VideoWriter(save_path, fourcc, fps, (width, height))

    # 检查是否导入视频成功
    if not cap.isOpened():
        print("视频无法打开")
        exit()

    frame_id = 0
    while True:
        ret, frame = cap.read()
        if not ret:
            print("视频推理完毕...")
            break

        frame_id += 1
        # if frame_id % 3 != 0:
        #     continue
        canvas, r, dw, dh, new_unpad_w, new_unpad_h = resize_unscale(frame, (640, 640))
        img = canvas.copy().astype(np.float32)  # (3,640,640) RGB
        img /= 255.0
        img[:, :, 0] -= 0.485
        img[:, :, 1] -= 0.456
        img[:, :, 2] -= 0.406
        img[:, :, 0] /= 0.229
        img[:, :, 1] /= 0.224
        img[:, :, 2] /= 0.225
        img = img.transpose(2, 0, 1)
        img = np.expand_dims(img, 0)  # (1, 3,640,640)

        # 推理
        img_det, boxes, color_seg, fps = infer(frame, img, r, dw, dh, new_unpad_w, new_unpad_h)
        if img_det is None:
            continue

        color_mask = np.mean(color_seg, 2)
        img_merge = canvas[dh:dh + new_unpad_h, dw:dw + new_unpad_w, :]

        # merge: resize to original size
        img_merge[color_mask != 0] = \
            img_merge[color_mask != 0] * 0.5 + color_seg[color_mask != 0] * 0.5
        img_merge = img_merge.astype(np.uint8)
        img_merge = cv2.resize(img_merge, (width, height),
                            interpolation=cv2.INTER_LINEAR)
        
        img_merge = cv2AddChineseText(img_merge, f'帧数:{frame_id}  帧率:{fps} 前方共有 {boxes.shape[0]} 辆车...', 
                                    (100, 50), textColor=(0, 0, 255), textSize=30)
        img_merge = cv2AddChineseText(img_merge, '前方绿色区域为可行驶区域,红色为检出的车道线...',
                                      (100, 100), textColor=(0, 0, 255), textSize=30)

        for i in range(boxes.shape[0]):
            x1, y1, x2, y2, conf, label = boxes[i]
            x1, y1, x2, y2, label = int(x1), int(y1), int(x2), int(y2), int(label)
            img_merge = cv2.rectangle(img_merge, (x1, y1), (x2, y2), (0, 255, 0), 2, 2)

        # cv2.imshow('img_merge', img_merge)
        # cv2.waitKey(0)

        writer.write(img_merge)

    cap.release()  # 释放摄像头
    writer.release()  # 可以实现预览
    cv2.destroyAllWindows() 



if __name__=="__main__":
    source = 'inference/videos/1.mp4'
    save_path = 'inference/output/output.mp4'
    main(source, save_path)

代码实现过程视频如下:

智能预警在Aidlux上的部署与应用

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值