关于物体(车辆)震颤(熄火)检测研究

关 于 物 体 ( 车 辆 ) 震 颤 ( 熄 火 ) 检 测 研 究 关于物体(车辆)震颤(熄火)检测研究


一 分析视频数据中车辆的振动和光影变化

1.1 边缘检测

import cv2
import os
import time
import torch.nn as nn
import torch
import numpy as np
import torchvision.transforms as transforms
import torchvision
from PIL import Image
from matplotlib import pyplot as plt

np.set_printoptions(threshold=np.inf)
# threshold表示: Total number of array elements to be print(输出数组的元素数目)

cap1 = cv2.VideoCapture("static.mkv")  # 0 使用默认的电脑摄像头
cap2 = cv2.VideoCapture("move.mkv")  # 0 使用默认的电脑摄像头
while (True):
    # 1.获取一帧帧图像
    ret1, static = cap1.read()
    ret2, move = cap2.read()
    # 转灰度图
    static = cv2.cvtColor(static, cv2.COLOR_BGR2GRAY)
    move = cv2.cvtColor(move, cv2.COLOR_BGR2GRAY)
    # 确定阈值
    threshold = 130
    # 阈值分割
    ret1, static = cv2.threshold(static, threshold, 255, cv2.THRESH_BINARY)
    ret2, move = cv2.threshold(move, threshold, 255, cv2.THRESH_BINARY)
    cv2.imshow('static', static)
    cv2.imshow('move', move)
    # 按下“q”键停止
    if cv2.waitKey(1) & 0xFF == ord('q'):  # cv2.waitKey(1) 1毫秒读一次
        break
cap1.release()
cap2.release()
cv2.destroyAllWindows()

在这里插入图片描述

1.2 转HSV查看亮度等变化

在这里插入图片描述
在这里插入图片描述


二 分析总结:

正面车辆振动和静止状态在震颤不明显,除去振动,另一方面,颜色光影变化在车辆振动时,也不明显,同时随着帧数的变化,摄像头所拍视频存在大面积轻微噪声,基本覆盖住了车辆振动造成的光影变化。

从正面进行车辆的震颤检测和光度变化等进行熄火检测不现实,建议拍摄尾部视频,进行排气管震颤或者冒烟检测

今日拍摄了车辆尾部视频,整体尾部振动不明显,排气管振动不明显,排出气体基本透明,无法检测。


三 结果:

检测视频车辆熄火,未成功!


四 研究过程中的收获:可以对车辆进行检测和分割

使 用 F a s t e r R C N N 进 行 车 辆 视 频 检 测 使用FasterRCNN进行车辆视频检测 使FasterRCNN

import cv2
import os
import time
import torch.nn as nn
import torch
import numpy as np
import torchvision.transforms as transforms
import torchvision
from PIL import Image
from matplotlib import pyplot as plt

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

COCO_INSTANCE_CATEGORY_NAMES = [
    '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
    'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
    'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
    'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
    'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
    'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
cap = cv2.VideoCapture("move.mkv")  # 0 使用默认的电脑摄像头


while (True):
    # 1.获取一帧帧图像
    ret, frame = cap.read()

    # 2.获取模型
    model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
    model.eval()

    # 3.图像送进模型
    preprocess = transforms.Compose([
        transforms.ToTensor(),
    ])
    # 3.1. preprocess
    img_chw = preprocess(frame)

    # 3.2 to device
    if torch.cuda.is_available():
        img_chw = img_chw.to('cuda')
        model.to('cuda')

    # 3.3 forward
    input_list = [img_chw]
    with torch.no_grad():
        tic = time.time()
        # print("input img tensor shape:{}".format(input_list[0].shape))
        output_list = model(input_list)
        output_dict = output_list[0]
        # print("pass: {:.3f}s".format(time.time() - tic))
        # for k, v in output_dict.items():
        #     print("key:{}, value:{}".format(k, v))

    # 3.4. visualization
    out_boxes = output_dict["boxes"].cpu()
    out_scores = output_dict["scores"].cpu()
    out_labels = output_dict["labels"].cpu()

    num_boxes = out_boxes.shape[0]
    max_vis = 2
    thres = 0.995


    for idx in range(0, min(num_boxes, max_vis)):

        score = out_scores[idx].numpy()  # 置信分数
        bbox = out_boxes[idx].numpy()  # 边框坐标
        class_name = COCO_INSTANCE_CATEGORY_NAMES[out_labels[idx]]  # 类别输出

        if score < thres:
            continue
        frame = cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 3)
        print("坐标:",(bbox[0], bbox[1]), (bbox[2], bbox[3]))
        loacation = str(((bbox[2]-bbox[0]),(bbox[3]-bbox[1])))
        frame = cv2.putText(frame,loacation, (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 0, 0))

    cv2.imshow('frame', frame)
    # 按下“q”键停止
    if cv2.waitKey(1) & 0xFF == ord('q'):  # cv2.waitKey(1) 1毫秒读一次
        break
cap.release()
cv2.destroyAllWindows()

在这里插入图片描述

使 用 M a s k e r R C N N 进 行 车 辆 视 频 的 检 测 和 分 割 使用MaskerRCNN进行车辆视频的检测和分割 使MaskerRCNN



import cv2
import os
import time
import torch.nn as nn
import torch
import numpy as np
import torchvision.transforms as transforms
import torchvision
from PIL import Image
from matplotlib import pyplot as plt
import random
# np.set_printoptions(threshold=np.inf)
# threshold表示: Total number of array elements to be print(输出数组的元素数目)

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

COCO_INSTANCE_CATEGORY_NAMES = [
    '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
    'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
    'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
    'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
    'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
    'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
    'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
    'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
cap = cv2.VideoCapture("move.mkv")  # 0 使用默认的电脑摄像头

def random_colour_masks(image):
    colours = [[0, 255, 0], [0, 0, 255], [255, 0, 0], [0, 255, 255], [255, 255, 0], [255, 0, 255], [80, 70, 180],
               [250, 80, 190], [245, 145, 50], [70, 150, 250], [50, 190, 190]]
    r = np.zeros_like(image).astype(np.uint8)
    g = np.zeros_like(image).astype(np.uint8)
    b = np.zeros_like(image).astype(np.uint8)
    r[image == 1], g[image == 1], b[image == 1] = colours[random.randrange(0, 10)]
    coloured_mask = np.stack([r, g, b], axis=2)
    return coloured_mask


while (True):
    # 1.获取一帧帧图像
    ret, frame = cap.read()

    # 2.获取模型
    model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    model.eval()

    # 3.图像送进模型
    preprocess = transforms.Compose([
        transforms.ToTensor(),
    ])
    # 3.1. preprocess
    img_chw = preprocess(frame)

    # 3.2 to device
    if torch.cuda.is_available():
        img_chw = img_chw.to('cuda')
        model.to('cuda')

    # 3.3 forward
    input_list = [img_chw]
    with torch.no_grad():
        tic = time.time()
        # print("input img tensor shape:{}".format(input_list[0].shape))
        output_list = model(input_list)
        output_dict = output_list[0]
        # print("pass: {:.3f}s".format(time.time() - tic))
        # for k, v in output_dict.items():
        #     print("key:{}, value:{}".format(k, v))

    # 3.4. visualization
    out_boxes = output_dict["boxes"].cpu()
    out_scores = output_dict["scores"].cpu()
    out_labels = output_dict["labels"].cpu()

    out_masks = output_dict["masks"].cpu()
    #print(out_masks[1].numpy())

    num_boxes = out_boxes.shape[0]
    max_vis = 40
    thres = 0.5
    masks = (output_dict["masks"] > 0.5).squeeze().detach().cpu().numpy()


    for i in range(len(masks)):
        rgb_mask = random_colour_masks(masks[i])
        frame = cv2.addWeighted(frame, 1, rgb_mask, 0.5, 0)


    # rgb_mask = random_colour_masks(masks[])
    # frame = cv2.addWeighted(frame, 1, rgb_mask, 0.5, 0)
    # 下面的注释解开,就是加上检测
    # for idx in range(0, min(num_boxes, max_vis)):
    #
    #     score = out_scores[idx].numpy()  # 置信分数
    #     bbox = out_boxes[idx].numpy()  # 边框坐标
    #     class_name = COCO_INSTANCE_CATEGORY_NAMES[out_labels[idx]]  # 类别输出
    #
    #     if score < thres:
    #         continue
    #     frame = cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 3)
    #     print("坐标:",(bbox[0], bbox[1]), (bbox[2], bbox[3]))
    #     loacation = str(((bbox[2]-bbox[0]),(bbox[3]-bbox[1])))
    #     frame = cv2.putText(frame,loacation, (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 0, 0))

    cv2.imshow('frame', frame)
    # 按下“q”键停止
    if cv2.waitKey(1) & 0xFF == ord('q'):  # cv2.waitKey(1) 1毫秒读一次
        break
cap.release()
cv2.destroyAllWindows()

在这里插入图片描述

评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值