两百行C++代码实现yolov5人形运动检测

简单实现了一下yolov5单个人形的运动检测功能,多个的话涉及到多目标跟踪,以后有空再弄。实现思路即目标检测+静止抑制,具体步骤如下:
(1)训练yolov5模型,这里就没有自己训练了,直接使用官方的开源模型yolov5s.pt;
(2)运行yolov5工程下面的export.py,将pt模型转成onnx模型;
(3)编写yolov5部署的C++工程,包括前处理、推理和后处理部分;
(4)设置一个固定长度的队列,用来存放每帧检测出单个人形检测框的中心点;
(5)读取视频第一帧,用yolov5检测第一帧图像的人形,检测器能可能检测出0或多个人形,如果检测数不为1则直接跳过读取后续帧;
(6)读取后续帧,直到检测出人形的帧数达到队列长度,此时队列已满;
(7)此时再处理图片帧的时候,需要弹出队列首,把新计算得到的检测框中心点添加到队列尾;
(8)计算队列中相邻元素检测框中心点的距离,若相邻元素检测框中心点的距离都大于阈值,说明检测物在一直移动,显示运动检测框,否则不显示。
具体实现的时候,队列的长度和移动距离的阈值需要调节,LZ取队列长度为5,移动距离阈值为检测框对角线长度除以100。下面的代码基本上实现了实时运动检测功能,但不知道有什么应用哈哈,LZ就疫情在家没事写了玩的。

#include <iostream>
#include <fstream>
#include <opencv2/opencv.hpp>


#define DEBUG


// 常量
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.6;
const float NMS_THRESHOLD = 0.25;
const float CONFIDENCE_THRESHOLD = 0.6;

const std::vector<std::string> class_name = {
	"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
	"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
	"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
	"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
	"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
	"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
	"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
	"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
	"hair drier", "toothbrush" };

const int QUEUE_SIZE = 5;


//画出检测框和标签
void draw_label(cv::Mat& input_image, std::string label, int left, int top)
{
	int baseLine;
	cv::Size label_size = cv::getTextSize(label, 0.7, 0.7, 1, &baseLine);
	top = std::max(top, label_size.height);
	cv::Point tlc = cv::Point(left, top);
	cv::Point brc = cv::Point(left, top + label_size.height + baseLine);
	cv::putText(input_image, label, cv::Point(left, top + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 255, 255), 1);
}


//预处理
std::vector<cv::Mat> preprocess(cv::Mat & input_image, cv::dnn::Net & net)
{
	cv::Mat blob;
	cv::dnn::blobFromImage(input_image, blob, 1. / 255., cv::Size(INPUT_WIDTH, INPUT_HEIGHT), cv::Scalar(), true, false);

	net.setInput(blob);

	std::vector<cv::Mat> preprcess_image;
	net.forward(preprcess_image, net.getUnconnectedOutLayersNames());

	return preprcess_image;
}


//后处理
std::vector<cv::Rect> postprocess(std::vector<cv::Mat> & preprcess_image, cv::Mat & output_image, int class_index)
{
	std::vector<int> class_ids;
	std::vector<float> confidences;
	std::vector<cv::Rect> boxes;
	std::vector<cv::Rect> boxes_nms;

	float x_factor = output_image.cols / INPUT_WIDTH;
	float y_factor = output_image.rows / INPUT_HEIGHT;

	float* data = (float*)preprcess_image[0].data;

	const int dimensions = 85;
	const int rows = 25200;
	for (int i = 0; i < rows; ++i)
	{
		float confidence = data[4];
		if (confidence >= CONFIDENCE_THRESHOLD)
		{
			float* classes_scores = data + 5;
			cv::Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
			cv::Point class_id;
			double max_class_score;
			cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
			if (max_class_score > SCORE_THRESHOLD)
			{
				confidences.push_back(confidence);
				class_ids.push_back(class_id.x);

				float cx = data[0];
				float cy = data[1];
				float w = data[2];
				float h = data[3];
				int left = int((cx - 0.5 * w) * x_factor);
				int top = int((cy - 0.5 * h) * y_factor);
				int width = int(w * x_factor);
				int height = int(h * y_factor);
				boxes.push_back(cv::Rect(left, top, width, height));
			}
		}
		data += 85;
	}

	std::vector<int> indices;
	cv::dnn::NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
	for (size_t i = 0; i < indices.size(); i++)
	{
		int idx = indices[i];
		if (class_ids[idx] == class_index)
		{
			cv::Rect box = boxes[idx];
			boxes_nms.push_back(box);

			int left = box.x;
			int top = box.y;
			int width = box.width;
			int height = box.height;

			std::string label = cv::format("%.2f", confidences[idx]);
			label = class_name[class_ids[idx]] + ":" + label;
			draw_label(output_image, label, left, top);
		}
	}

	return boxes_nms;
}


//计算检测框的中心
cv::Point get_center(cv::Rect detection)
{
	return cv::Point(detection.x + detection.width / 2, detection.y + detection.height / 2);;
}


//计算两点间距离
float get_distance(cv::Point p1, cv::Point p2)
{
	return sqrt(pow(p1.x - p2.x, 2) + pow(p1.y - p2.y, 2));
}



int main(int argc, char** argv)
{
	cv::VideoCapture capture(0);
	cv::Mat frame;
	cv::dnn::Net net = cv::dnn::readNet("yolov5s-f32.onnx");

	int frame_num = 0;
	std::vector<cv::Point> detection_center_queue;

	while (cv::waitKey(1) < 0)
	{
		capture >> frame;
		if (frame.empty())
			break;

		std::cout << "******************************************************************* frame_num: " << frame_num << std::endl;

		cv::Mat image = frame.clone();
		std::vector<cv::Mat> preprcess_image = preprocess(image, net);

		std::vector<cv::Rect> detections = postprocess(preprcess_image, image, 0);
		cv::Rect detection;

		if (detections.size() != 1)
			goto FLAG;

		detection = detections[0];

		if (frame_num < QUEUE_SIZE)
		{
			cv::Point detection_center = get_center(detection);
			detection_center_queue.push_back(detection_center);

#ifdef DEBUG
			std::cout << "detection_center_queue:" << std::endl;
			for (size_t i = 0; i < detection_center_queue.size(); i++)
			{
				std::cout << detection_center_queue[i] << " ";
			}
			std::cout << std::endl;
#endif // DEBUG
		}
		else
		{
			detection_center_queue.erase(detection_center_queue.begin());
			cv::Point detection_center = get_center(detection);
			detection_center_queue.push_back(detection_center);
			
#ifdef DEBUG
			std::cout << "detection_center_queue:" << std::endl;
			for (size_t i = 0; i < detection_center_queue.size(); i++)
			{
				std::cout << detection_center_queue[i] << " ";
			}
			std::cout << std::endl;
#endif // DEBUG

			std::vector<float> distances;

			for (size_t i = 1; i < detection_center_queue.size(); i++)
			{
				cv::Point detection_center_old = detection_center_queue[i - 1];
				cv::Point detection_center_new = detection_center_queue[i];

				float distance = get_distance(detection_center_new, detection_center_old);
				distances.push_back(distance);
				std::cout << distance << std::endl;
			}

			bool show_box = true;
			for (size_t i = 0; i < distances.size(); i++)
			{
				float moving_threshold = sqrt(pow(detection.width, 2) + pow(detection.height, 2)) / 100;
				//float moving_threshold = 5;
				if (distances[i] < moving_threshold)
					show_box = false;
			}

			if(show_box)
				cv::rectangle(image, cv::Point(detection.x, detection.y), \
					cv::Point(detection.x + detection.width, detection.y + detection.height), cv::Scalar(0, 0, 255), 2);
		}

		frame_num++;

	FLAG:
		cv::imshow("output", image);		
	}

	capture.release();
	return 0;
}

后续:多目标跟踪版本的实现sort+yolov5

  • 1
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
基于yolov5的烟火检测可以分为两个主要步骤:模型训练和C++实现部署。 首先是模型训练。我们需要收集包含烟火的图像数据集,并进数据标注,即标记图像中的烟火目标。接着,在yolov5的基础上进一些调整和优化,以适应烟火检测任务。然后,使用标注好的数据集进模型训练,通过迭代优化模型的权重和参数,使其具备较高的烟火检测精度。可以使用GPU来加速模型训练过程。最后,通过评估指标如mAP(平均精度均值)等来评估模型的性能。 其次是C++实现部署。在完成模型训练后,我们需要将其部署到实际环境中进使用。首先,将模型保存为.onnx或.torchscript格式,以便于在C++中加载和使用。然后,使用C++编程语言,利用相应的深度学习框架(如OpenCV、TensorRT等)进模型加载和推理。通过读取输入图像数据,将其输入到模型中进烟火检测,获取检测结果。最后,根据实际需求,可以将检测结果进可视化展示、保存或者与其他业务系统进集成。 总之,基于yolov5的烟火检测涉及模型训练和C++实现部署两个主要步骤。模型训练需要进数据收集、标注和调优,以及模型训练和性能评估;C++实现部署需要将模型保存,并利用C++编程语言和深度学习框架进模型加载、推理和结果处理。这样可以实现高效准确的烟火检测功能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

给算法爸爸上香

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值